hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d3a0ba5d465e6a77d88cb835d53e05823df2e68c | 2,564 | py | Python | transformer/model/embedder.py | ViktorStagge/transformer | 39a331f41cafbfbe5fdf64ffdac5897b4d2aa319 | [
"MIT"
] | null | null | null | transformer/model/embedder.py | ViktorStagge/transformer | 39a331f41cafbfbe5fdf64ffdac5897b4d2aa319 | [
"MIT"
] | null | null | null | transformer/model/embedder.py | ViktorStagge/transformer | 39a331f41cafbfbe5fdf64ffdac5897b4d2aa319 | [
"MIT"
] | null | null | null | from typing import Optional
from keras.layers import Input, \
Embedding, \
Add, \
Dropout
from keras.models import Model
from transformer.model.layers import PositionalEncoding
class Embedder(Model):
def __init__(self,
sequence_length: int,
vocab_size: int = 16384,
d_model: int = 128,
dropout: float = 0.1,
batch_size: Optional[int] = None,
name: str = 'Embedder',
use_positional_encoding: bool = True,
use_mask: bool = True,
**kwargs):
x = Input(batch_shape=(batch_size, sequence_length),
name='x')
x_output = Input(batch_shape=(batch_size, sequence_length),
name='x_output')
# Embedding
embedding_layer = Embedding(input_dim=vocab_size,
output_dim=d_model,
embeddings_initializer='uniform',
mask_zero=use_mask,
name='word_embedding')
encoder_embedding = embedding_layer(x)
decoder_embedding = embedding_layer(x_output)
# Postional Encoding
if use_positional_encoding:
pe_layer = PositionalEncoding(batch_size=batch_size,
verbose=True,
name='positional_encoding')
encoder_pos = pe_layer(encoder_embedding)
decoder_pos = pe_layer(decoder_embedding)
encoder_embedding = Add(name='encoder_total_embedding')([encoder_embedding, encoder_pos])
decoder_embedding = Add(name='decoder_total_embedding')([decoder_embedding, decoder_pos])
# Dropout
h_L0 = Dropout(rate=dropout, name='h_L0')(encoder_embedding)
h_output = Dropout(rate=dropout, name='h_output')(decoder_embedding)
inputs = [x, x_output]
outputs = [h_L0, h_output]
super().__init__(inputs=inputs,
outputs=outputs,
name=name,
**kwargs)
self.embedding_layer = [embedding_layer]
def get_config(self):
config = super().get_config()
return config
@classmethod
def from_config(cls, config, custom_objects=None):
model = super().from_config(config=config, custom_objects=custom_objects)
return model
| 36.112676 | 101 | 0.544462 |
48d0278264730d3fadafa0f93a0184c86802545b | 17,184 | py | Python | windows/winproxy/apis/advapi32.py | 1orenz0/PythonForWindows | f3de7b528b020b45ac6a871c975006fc1db1c3b0 | [
"BSD-3-Clause"
] | 1 | 2021-06-22T16:50:31.000Z | 2021-06-22T16:50:31.000Z | windows/winproxy/apis/advapi32.py | 1orenz0/PythonForWindows | f3de7b528b020b45ac6a871c975006fc1db1c3b0 | [
"BSD-3-Clause"
] | null | null | null | windows/winproxy/apis/advapi32.py | 1orenz0/PythonForWindows | f3de7b528b020b45ac6a871c975006fc1db1c3b0 | [
"BSD-3-Clause"
] | 1 | 2021-05-12T12:58:27.000Z | 2021-05-12T12:58:27.000Z | import ctypes
import windows.generated_def as gdef
from ..apiproxy import ApiProxy, NeededParameter
from ..error import fail_on_zero, succeed_on_zero, result_is_error_code
class Advapi32Proxy(ApiProxy):
APIDLL = "advapi32"
default_error_check = staticmethod(fail_on_zero)
# Process
@Advapi32Proxy()
def CreateProcessAsUserA(hToken, lpApplicationName, lpCommandLine=None, lpProcessAttributes=None, lpThreadAttributes=None, bInheritHandles=False,
dwCreationFlags=0, lpEnvironment=None, lpCurrentDirectory=None, lpStartupInfo=None, lpProcessInformation=None):
if lpStartupInfo is None:
StartupInfo = gdef.STARTUPINFOA()
StartupInfo.cb = ctypes.sizeof(StartupInfo)
StartupInfo.dwFlags = 0
# StartupInfo.wShowWindow = gdef.SW_HIDE
lpStartupInfo = ctypes.byref(StartupInfo)
if lpProcessInformation is None:
lpProcessInformation = ctypes.byref(gdef.PROCESS_INFORMATION())
return CreateProcessAsUserA.ctypes_function(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation)
return CreateProcessAsUserA.ctypes_function(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation)
@Advapi32Proxy()
def CreateProcessAsUserW(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation):
return CreateProcessAsUserW.ctypes_function(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation)
# Token
@Advapi32Proxy()
def OpenProcessToken(ProcessHandle=None, DesiredAccess=NeededParameter, TokenHandle=NeededParameter):
"""If ProcessHandle is None: take the current process"""
if ProcessHandle is None:
# TODO: FAIL
ProcessHandle = GetCurrentProcess()
return OpenProcessToken.ctypes_function(ProcessHandle, DesiredAccess, TokenHandle)
@Advapi32Proxy()
def OpenThreadToken(ThreadHandle, DesiredAccess, OpenAsSelf, TokenHandle):
return OpenThreadToken.ctypes_function(ThreadHandle, DesiredAccess, OpenAsSelf, TokenHandle)
@Advapi32Proxy()
def SetThreadToken(Thread, Token):
if isinstance(Thread, (int, long)):
Thread = gdef.HANDLE(Thread)
return SetThreadToken.ctypes_function(Thread, Token)
@Advapi32Proxy()
def DuplicateToken(ExistingTokenHandle, ImpersonationLevel, DuplicateTokenHandle):
return DuplicateToken.ctypes_function(ExistingTokenHandle, ImpersonationLevel, DuplicateTokenHandle)
@Advapi32Proxy()
def DuplicateTokenEx(hExistingToken, dwDesiredAccess, lpTokenAttributes, ImpersonationLevel, TokenType, phNewToken):
return DuplicateTokenEx.ctypes_function(hExistingToken, dwDesiredAccess, lpTokenAttributes, ImpersonationLevel, TokenType, phNewToken)
@Advapi32Proxy()
def GetTokenInformation(TokenHandle=NeededParameter, TokenInformationClass=NeededParameter, TokenInformation=None, TokenInformationLength=0, ReturnLength=None):
if ReturnLength is None:
ReturnLength = ctypes.byref(gdef.DWORD())
return GetTokenInformation.ctypes_function(TokenHandle, TokenInformationClass, TokenInformation, TokenInformationLength, ReturnLength)
@Advapi32Proxy()
def SetTokenInformation(TokenHandle, TokenInformationClass, TokenInformation, TokenInformationLength):
return SetTokenInformation.ctypes_function(TokenHandle, TokenInformationClass, TokenInformation, TokenInformationLength)
# Token - Privilege
@Advapi32Proxy()
def LookupPrivilegeValueA(lpSystemName=None, lpName=NeededParameter, lpLuid=NeededParameter):
return LookupPrivilegeValueA.ctypes_function(lpSystemName, lpName, lpLuid)
@Advapi32Proxy()
def LookupPrivilegeValueW(lpSystemName=None, lpName=NeededParameter, lpLuid=NeededParameter):
return LookupPrivilegeValueW.ctypes_function(lpSystemName, lpName, lpLuid)
@Advapi32Proxy()
def LookupPrivilegeNameA(lpSystemName, lpLuid, lpName, cchName):
return LookupPrivilegeNameA.ctypes_function(lpSystemName, lpLuid, lpName, cchName)
@Advapi32Proxy()
def LookupPrivilegeNameW(lpSystemName, lpLuid, lpName, cchName):
return LookupPrivilegeNameW.ctypes_function(lpSystemName, lpLuid, lpName, cchName)
@Advapi32Proxy()
def AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges=False, NewState=NeededParameter, BufferLength=None, PreviousState=None, ReturnLength=None):
if BufferLength is None:
BufferLength = ctypes.sizeof(NewState)
return AdjustTokenPrivileges.ctypes_function(TokenHandle, DisableAllPrivileges, NewState, BufferLength, PreviousState, ReturnLength)
# Sid
@Advapi32Proxy()
def LookupAccountSidA(lpSystemName, lpSid, lpName, cchName, lpReferencedDomainName, cchReferencedDomainName, peUse):
return LookupAccountSidA.ctypes_function(lpSystemName, lpSid, lpName, cchName, lpReferencedDomainName, cchReferencedDomainName, peUse)
@Advapi32Proxy()
def LookupAccountSidW(lpSystemName, lpSid, lpName, cchName, lpReferencedDomainName, cchReferencedDomainName, peUse):
return LookupAccountSidW.ctypes_function(lpSystemName, lpSid, lpName, cchName, lpReferencedDomainName, cchReferencedDomainName, peUse)
@Advapi32Proxy()
def CreateWellKnownSid(WellKnownSidType, DomainSid=None, pSid=None, cbSid=NeededParameter):
return CreateWellKnownSid.ctypes_function(WellKnownSidType, DomainSid, pSid, cbSid)
@Advapi32Proxy()
def GetLengthSid(pSid):
return GetLengthSid.ctypes_function(pSid)
@Advapi32Proxy()
def EqualSid(pSid1, pSid2):
return EqualSid.ctypes_function(pSid1, pSid2)
@Advapi32Proxy()
def GetSidSubAuthority(pSid, nSubAuthority):
return GetSidSubAuthority.ctypes_function(pSid, nSubAuthority)
@Advapi32Proxy()
def GetSidSubAuthorityCount(pSid):
return GetSidSubAuthorityCount.ctypes_function(pSid)
@Advapi32Proxy()
def ConvertStringSidToSidA(StringSid, Sid):
return ConvertStringSidToSidA.ctypes_function(StringSid, Sid)
@Advapi32Proxy()
def ConvertStringSidToSidW(StringSid, Sid):
return ConvertStringSidToSidW.ctypes_function(StringSid, Sid)
@Advapi32Proxy()
def ConvertSidToStringSidA(Sid, StringSid):
return ConvertSidToStringSidA.ctypes_function(Sid, StringSid)
@Advapi32Proxy()
def ConvertSidToStringSidW(Sid, StringSid):
return ConvertSidToStringSidW.ctypes_function(Sid, StringSid)
@Advapi32Proxy()
def CopySid(nDestinationSidLength, pDestinationSid, pSourceSid):
return CopySid.ctypes_function(nDestinationSidLength, pDestinationSid, pSourceSid)
# Security descriptor
@Advapi32Proxy(error_check=result_is_error_code)
def GetNamedSecurityInfoA(pObjectName, ObjectType, SecurityInfo, ppsidOwner=None, ppsidGroup=None, ppDacl=None, ppSacl=None, ppSecurityDescriptor=None):
return GetNamedSecurityInfoA.ctypes_function(pObjectName, ObjectType, SecurityInfo, ppsidOwner, ppsidGroup, ppDacl, ppSacl, ppSecurityDescriptor)
@Advapi32Proxy(error_check=result_is_error_code)
def GetNamedSecurityInfoW(pObjectName, ObjectType, SecurityInfo, ppsidOwner=None, ppsidGroup=None, ppDacl=None, ppSacl=None, ppSecurityDescriptor=None):
return GetNamedSecurityInfoW.ctypes_function(pObjectName, ObjectType, SecurityInfo, ppsidOwner, ppsidGroup, ppDacl, ppSacl, ppSecurityDescriptor)
@Advapi32Proxy(error_check=succeed_on_zero)
def GetSecurityInfo(handle, ObjectType, SecurityInfo, ppsidOwner=None, ppsidGroup=None, ppDacl=None, ppSacl=None, ppSecurityDescriptor=None):
return GetSecurityInfo.ctypes_function(handle, ObjectType, SecurityInfo, ppsidOwner, ppsidGroup, ppDacl, ppSacl, ppSecurityDescriptor)
@Advapi32Proxy()
def IsValidSecurityDescriptor(pSecurityDescriptor):
return IsValidSecurityDescriptor.ctypes_function(pSecurityDescriptor)
@Advapi32Proxy()
def ConvertStringSecurityDescriptorToSecurityDescriptorA(StringSecurityDescriptor, StringSDRevision, SecurityDescriptor, SecurityDescriptorSize):
return ConvertStringSecurityDescriptorToSecurityDescriptorA.ctypes_function(StringSecurityDescriptor, StringSDRevision, SecurityDescriptor, SecurityDescriptorSize)
@Advapi32Proxy()
def ConvertStringSecurityDescriptorToSecurityDescriptorW(StringSecurityDescriptor, StringSDRevision, SecurityDescriptor, SecurityDescriptorSize):
return ConvertStringSecurityDescriptorToSecurityDescriptorW.ctypes_function(StringSecurityDescriptor, StringSDRevision, SecurityDescriptor, SecurityDescriptorSize)
@Advapi32Proxy()
def ConvertSecurityDescriptorToStringSecurityDescriptorA(SecurityDescriptor, RequestedStringSDRevision, SecurityInformation, StringSecurityDescriptor, StringSecurityDescriptorLen):
return ConvertSecurityDescriptorToStringSecurityDescriptorA.ctypes_function(SecurityDescriptor, RequestedStringSDRevision, SecurityInformation, StringSecurityDescriptor, StringSecurityDescriptorLen)
@Advapi32Proxy()
def ConvertSecurityDescriptorToStringSecurityDescriptorW(SecurityDescriptor, RequestedStringSDRevision, SecurityInformation, StringSecurityDescriptor, StringSecurityDescriptorLen):
return ConvertSecurityDescriptorToStringSecurityDescriptorW.ctypes_function(SecurityDescriptor, RequestedStringSDRevision, SecurityInformation, StringSecurityDescriptor, StringSecurityDescriptorLen)
@Advapi32Proxy()
def GetSecurityDescriptorDacl(pSecurityDescriptor, lpbDaclPresent, pDacl, lpbDaclDefaulted):
return GetSecurityDescriptorDacl.ctypes_function(pSecurityDescriptor, lpbDaclPresent, pDacl, lpbDaclDefaulted)
@Advapi32Proxy()
def GetSecurityDescriptorLength(pSecurityDescriptor):
return GetSecurityDescriptorLength.ctypes_function(pSecurityDescriptor)
@Advapi32Proxy()
def GetSecurityDescriptorControl(pSecurityDescriptor, pControl, lpdwRevision):
return GetSecurityDescriptorControl.ctypes_function(pSecurityDescriptor, pControl, lpdwRevision)
@Advapi32Proxy()
def GetSecurityDescriptorOwner(pSecurityDescriptor, pOwner, lpbOwnerDefaulted):
return GetSecurityDescriptorOwner.ctypes_function(pSecurityDescriptor, pOwner, lpbOwnerDefaulted)
@Advapi32Proxy()
def GetSecurityDescriptorGroup(pSecurityDescriptor, pGroup, lpbGroupDefaulted):
return GetSecurityDescriptorGroup.ctypes_function(pSecurityDescriptor, pGroup, lpbGroupDefaulted)
@Advapi32Proxy()
def GetSecurityDescriptorSacl(pSecurityDescriptor, lpbSaclPresent, pSacl, lpbSaclDefaulted):
return GetSecurityDescriptorSacl.ctypes_function(pSecurityDescriptor, lpbSaclPresent, pSacl, lpbSaclDefaulted)
# ACE - ACL
@Advapi32Proxy()
def GetAclInformation(pAcl, pAclInformation, nAclInformationLength, dwAclInformationClass):
return GetAclInformation.ctypes_function(pAcl, pAclInformation, nAclInformationLength, dwAclInformationClass)
@Advapi32Proxy()
def GetAce(pAcl, dwAceIndex, pAce):
return GetAce.ctypes_function(pAcl, dwAceIndex, pAce)
# Registry
@Advapi32Proxy(error_check=succeed_on_zero)
def RegOpenKeyExA(hKey, lpSubKey, ulOptions, samDesired, phkResult):
return RegOpenKeyExA.ctypes_function(hKey, lpSubKey, ulOptions, samDesired, phkResult)
@Advapi32Proxy(error_check=succeed_on_zero)
def RegOpenKeyExW(hKey, lpSubKey, ulOptions, samDesired, phkResult):
return RegOpenKeyExW.ctypes_function(hKey, lpSubKey, ulOptions, samDesired, phkResult)
@Advapi32Proxy(error_check=succeed_on_zero)
def RegGetValueA(hkey, lpSubKey, lpValue, dwFlags, pdwType, pvData, pcbData):
return RegGetValueA.ctypes_function(hkey, lpSubKey, lpValue, dwFlags, pdwType, pvData, pcbData)
@Advapi32Proxy(error_check=succeed_on_zero)
def RegGetValueW(hkey, lpSubKey=None, lpValue=NeededParameter, dwFlags=0, pdwType=None, pvData=None, pcbData=None):
return RegGetValueW.ctypes_function(hkey, lpSubKey, lpValue, dwFlags, pdwType, pvData, pcbData)
@Advapi32Proxy(error_check=succeed_on_zero)
def RegQueryValueExA(hKey, lpValueName, lpReserved, lpType, lpData, lpcbData):
return RegQueryValueExA.ctypes_function(hKey, lpValueName, lpReserved, lpType, lpData, lpcbData)
@Advapi32Proxy(error_check=succeed_on_zero)
def RegQueryValueExW(hKey, lpValueName, lpReserved, lpType, lpData, lpcbData):
return RegQueryValueExA.ctypes_function(hKey, lpValueName, lpReserved, lpType, lpData, lpcbData)
@Advapi32Proxy(error_check=succeed_on_zero)
def RegCloseKey(hKey):
return RegCloseKey.ctypes_function(hKey)
# Service
@Advapi32Proxy()
def OpenSCManagerA(lpMachineName=None, lpDatabaseName=None, dwDesiredAccess=gdef.SC_MANAGER_ALL_ACCESS):
return OpenSCManagerA.ctypes_function(lpMachineName, lpDatabaseName, dwDesiredAccess)
@Advapi32Proxy()
def OpenSCManagerW(lpMachineName=None, lpDatabaseName=None, dwDesiredAccess=gdef.SC_MANAGER_ALL_ACCESS):
return OpenSCManagerW.ctypes_function(lpMachineName, lpDatabaseName, dwDesiredAccess)
@Advapi32Proxy()
def EnumServicesStatusExA(hSCManager, InfoLevel, dwServiceType, dwServiceState, lpServices, cbBufSize, pcbBytesNeeded, lpServicesReturned, lpResumeHandle, pszGroupName):
return EnumServicesStatusExA.ctypes_function(hSCManager, InfoLevel, dwServiceType, dwServiceState, lpServices, cbBufSize, pcbBytesNeeded, lpServicesReturned, lpResumeHandle, pszGroupName)
@Advapi32Proxy()
def EnumServicesStatusExW(hSCManager, InfoLevel, dwServiceType, dwServiceState, lpServices, cbBufSize, pcbBytesNeeded, lpServicesReturned, lpResumeHandle, pszGroupName):
return EnumServicesStatusExW.ctypes_function(hSCManager, InfoLevel, dwServiceType, dwServiceState, lpServices, cbBufSize, pcbBytesNeeded, lpServicesReturned, lpResumeHandle, pszGroupName)
@Advapi32Proxy()
def StartServiceA(hService, dwNumServiceArgs, lpServiceArgVectors):
return StartServiceA.ctypes_function(hService, dwNumServiceArgs, lpServiceArgVectors)
@Advapi32Proxy()
def StartServiceW(hService, dwNumServiceArgs, lpServiceArgVectors):
return StartServiceW.ctypes_function(hService, dwNumServiceArgs, lpServiceArgVectors)
@Advapi32Proxy()
def OpenServiceA(hSCManager, lpServiceName, dwDesiredAccess):
return OpenServiceA.ctypes_function(hSCManager, lpServiceName, dwDesiredAccess)
@Advapi32Proxy()
def OpenServiceW(hSCManager, lpServiceName, dwDesiredAccess):
return OpenServiceW.ctypes_function(hSCManager, lpServiceName, dwDesiredAccess)
@Advapi32Proxy()
def CloseServiceHandle(hSCObject):
return CloseServiceHandle.ctypes_function(hSCObject)
# Event log
@Advapi32Proxy()
def OpenEventLogA(lpUNCServerName=None, lpSourceName=NeededParameter):
return OpenEventLogA.ctypes_function(lpUNCServerName, lpSourceName)
@Advapi32Proxy()
def OpenEventLogW(lpUNCServerName=None, lpSourceName=NeededParameter):
return OpenEventLogW.ctypes_function(lpUNCServerName, lpSourceName)
@Advapi32Proxy()
def OpenBackupEventLogA(lpUNCServerName=None, lpSourceName=NeededParameter):
return OpenBackupEventLogA.ctypes_function(lpUNCServerName, lpSourceName)
@Advapi32Proxy()
def OpenBackupEventLogW(lpUNCServerName=None, lpSourceName=NeededParameter):
return OpenBackupEventLogW.ctypes_function(lpUNCServerName, lpSourceName)
@Advapi32Proxy()
def ReadEventLogA(hEventLog, dwReadFlags, dwRecordOffset, lpBuffer, nNumberOfBytesToRead, pnBytesRead, pnMinNumberOfBytesNeeded):
return ReadEventLogA.ctypes_function(hEventLog, dwReadFlags, dwRecordOffset, lpBuffer, nNumberOfBytesToRead, pnBytesRead, pnMinNumberOfBytesNeeded)
@Advapi32Proxy()
def ReadEventLogW(hEventLog, dwReadFlags, dwRecordOffset, lpBuffer, nNumberOfBytesToRead, pnBytesRead, pnMinNumberOfBytesNeeded):
return ReadEventLogW.ctypes_function(hEventLog, dwReadFlags, dwRecordOffset, lpBuffer, nNumberOfBytesToRead, pnBytesRead, pnMinNumberOfBytesNeeded)
@Advapi32Proxy()
def GetEventLogInformation(hEventLog, dwInfoLevel, lpBuffer, cbBufSize, pcbBytesNeeded):
return GetEventLogInformation.ctypes_function(hEventLog, dwInfoLevel, lpBuffer, cbBufSize, pcbBytesNeeded)
@Advapi32Proxy()
def GetNumberOfEventLogRecords(hEventLog, NumberOfRecords):
return GetNumberOfEventLogRecords.ctypes_function(hEventLog, NumberOfRecords)
@Advapi32Proxy()
def CloseEventLog(hEventLog):
return CloseEventLog.ctypes_function(hEventLog)
# Crypto
## Crypto key
@Advapi32Proxy()
def CryptGenKey(hProv, Algid, dwFlags, phKey):
return CryptGenKey.ctypes_function(hProv, Algid, dwFlags, phKey)
@Advapi32Proxy()
def CryptDestroyKey(hKey):
return CryptDestroyKey.ctypes_function(hKey)
@Advapi32Proxy()
def CryptExportKey(hKey, hExpKey, dwBlobType, dwFlags, pbData, pdwDataLen):
return CryptExportKey.ctypes_function(hKey, hExpKey, dwBlobType, dwFlags, pbData, pdwDataLen)
## crypt context
@Advapi32Proxy()
def CryptAcquireContextA(phProv, pszContainer, pszProvider, dwProvType, dwFlags):
return CryptAcquireContextA.ctypes_function(phProv, pszContainer, pszProvider, dwProvType, dwFlags)
@Advapi32Proxy()
def CryptAcquireContextW(phProv, pszContainer, pszProvider, dwProvType, dwFlags):
return CryptAcquireContextW.ctypes_function(phProv, pszContainer, pszProvider, dwProvType, dwFlags)
@Advapi32Proxy()
def CryptReleaseContext(hProv, dwFlags):
return CryptReleaseContext.ctypes_function(hProv, dwFlags)
| 48 | 236 | 0.837465 |
106f039bec10519325ff720a1bb5e2e380da1266 | 561 | py | Python | docs/tutorial/python/flask/app.py | mrpotes/go-raml | f151e1e143c47282b294fe70c5e56f113988ed10 | [
"BSD-2-Clause"
] | 142 | 2016-02-11T06:23:34.000Z | 2022-03-24T06:05:22.000Z | docs/tutorial/python/flask/app.py | mrpotes/go-raml | f151e1e143c47282b294fe70c5e56f113988ed10 | [
"BSD-2-Clause"
] | 297 | 2016-02-04T06:23:13.000Z | 2020-08-20T13:23:22.000Z | docs/tutorial/python/flask/app.py | mrpotes/go-raml | f151e1e143c47282b294fe70c5e56f113988ed10 | [
"BSD-2-Clause"
] | 49 | 2016-02-01T10:59:50.000Z | 2021-05-20T15:04:01.000Z | # DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from flask import Flask, send_from_directory, send_file
from users_api import users_api
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
app = Flask(__name__)
app.register_blueprint(users_api)
@app.route('/apidocs/<path:path>')
def send_js(path):
return send_from_directory(dir_path + '/' + 'apidocs', path)
@app.route('/', methods=['GET'])
def home():
return send_file(dir_path + '/index.html')
if __name__ == "__main__":
app.run(debug=True)
| 21.576923 | 79 | 0.721925 |
58debd7235d3f784db29fac50e97a164b384e9ac | 35,709 | py | Python | src/pretix/base/services/cart.py | MaxRink/pretix | f561ece9d1591673a495a6226db812e809ab3aec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/base/services/cart.py | MaxRink/pretix | f561ece9d1591673a495a6226db812e809ab3aec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/base/services/cart.py | MaxRink/pretix | f561ece9d1591673a495a6226db812e809ab3aec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from collections import Counter, defaultdict, namedtuple
from datetime import timedelta
from decimal import Decimal
from typing import List, Optional
from celery.exceptions import MaxRetriesExceededError
from django.db import transaction
from django.db.models import Q
from django.dispatch import receiver
from django.utils.timezone import now
from django.utils.translation import pgettext_lazy, ugettext as _
from pretix.base.i18n import LazyLocaleException, language
from pretix.base.models import (
CartPosition, Event, InvoiceAddress, Item, ItemVariation, Voucher,
)
from pretix.base.models.event import SubEvent
from pretix.base.models.orders import OrderFee
from pretix.base.models.tax import TAXED_ZERO, TaxedPrice, TaxRule
from pretix.base.services.async import ProfiledTask
from pretix.base.services.locking import LockTimeoutException
from pretix.base.services.pricing import get_price
from pretix.base.templatetags.rich_text import rich_text
from pretix.celery_app import app
from pretix.presale.signals import (
checkout_confirm_messages, fee_calculation_for_cart,
)
class CartError(LazyLocaleException):
pass
error_messages = {
'busy': _('We were not able to process your request completely as the '
'server was too busy. Please try again.'),
'empty': _('You did not select any products.'),
'unknown_position': _('Unknown cart position.'),
'subevent_required': pgettext_lazy('subevent', 'No date was specified.'),
'not_for_sale': _('You selected a product which is not available for sale.'),
'unavailable': _('Some of the products you selected are no longer available. '
'Please see below for details.'),
'in_part': _('Some of the products you selected are no longer available in '
'the quantity you selected. Please see below for details.'),
'max_items': _("You cannot select more than %s items per order."),
'max_items_per_product': _("You cannot select more than %(max)s items of the product %(product)s."),
'min_items_per_product': _("You need to select at least %(min)s items of the product %(product)s."),
'min_items_per_product_removed': _("We removed %(product)s from your cart as you can not buy less than "
"%(min)s items of it."),
'not_started': _('The presale period for this event has not yet started.'),
'ended': _('The presale period for this event has ended.'),
'some_subevent_not_started': _('The presale period for this event has not yet started. The affected positions '
'have been removed from your cart.'),
'some_subevent_ended': _('The presale period for one of the events in your cart has ended. The affected '
'positions have been removed from your cart.'),
'price_too_high': _('The entered price is to high.'),
'voucher_invalid': _('This voucher code is not known in our database.'),
'voucher_redeemed': _('This voucher code has already been used the maximum number of times allowed.'),
'voucher_redeemed_partial': _('This voucher code can only be redeemed %d more times.'),
'voucher_double': _('You already used this voucher code. Remove the associated line from your '
'cart if you want to use it for a different product.'),
'voucher_expired': _('This voucher is expired.'),
'voucher_invalid_item': _('This voucher is not valid for this product.'),
'voucher_item_not_available': _(
'Your voucher is valid for a product that is currently not for sale.'),
'voucher_invalid_subevent': pgettext_lazy('subevent', 'This voucher is not valid for this event date.'),
'voucher_required': _('You need a valid voucher code to order this product.'),
'inactive_subevent': pgettext_lazy('subevent', 'The selected event date is not active.'),
'addon_invalid_base': _('You can not select an add-on for the selected product.'),
'addon_duplicate_item': _('You can not select two variations of the same add-on product.'),
'addon_max_count': _('You can select at most %(max)s add-ons from the category %(cat)s for the product %(base)s.'),
'addon_min_count': _('You need to select at least %(min)s add-ons from the category %(cat)s for the '
'product %(base)s.'),
'addon_only': _('One of the products you selected can only be bought as an add-on to another project.'),
}
class CartManager:
AddOperation = namedtuple('AddOperation', ('count', 'item', 'variation', 'price', 'voucher', 'quotas',
'addon_to', 'subevent', 'includes_tax'))
RemoveOperation = namedtuple('RemoveOperation', ('position',))
ExtendOperation = namedtuple('ExtendOperation', ('position', 'count', 'item', 'variation', 'price', 'voucher',
'quotas', 'subevent'))
order = {
RemoveOperation: 10,
ExtendOperation: 20,
AddOperation: 30
}
def __init__(self, event: Event, cart_id: str, invoice_address: InvoiceAddress=None):
self.event = event
self.cart_id = cart_id
self.now_dt = now()
self._operations = []
self._quota_diff = Counter()
self._voucher_use_diff = Counter()
self._items_cache = {}
self._subevents_cache = {}
self._variations_cache = {}
self._expiry = None
self.invoice_address = invoice_address
@property
def positions(self):
return CartPosition.objects.filter(
Q(cart_id=self.cart_id) & Q(event=self.event)
).select_related('item', 'subevent')
def _calculate_expiry(self):
self._expiry = self.now_dt + timedelta(minutes=self.event.settings.get('reservation_time', as_type=int))
def _check_presale_dates(self):
if self.event.presale_start and self.now_dt < self.event.presale_start:
raise CartError(error_messages['not_started'])
if self.event.presale_has_ended:
raise CartError(error_messages['ended'])
def _extend_expiry_of_valid_existing_positions(self):
# Extend this user's cart session to ensure all items in the cart expire at the same time
# We can extend the reservation of items which are not yet expired without risk
self.positions.filter(expires__gt=self.now_dt).update(expires=self._expiry)
def _delete_out_of_timeframe(self):
err = None
for cp in self.positions:
if cp.subevent and cp.subevent.presale_start and self.now_dt < cp.subevent.presale_start:
err = error_messages['some_subevent_not_started']
cp.delete()
if cp.subevent and cp.subevent.presale_end and self.now_dt > cp.subevent.presale_end:
err = error_messages['some_subevent_ended']
cp.delete()
return err
def _update_subevents_cache(self, se_ids: List[int]):
self._subevents_cache.update({
i.pk: i
for i in self.event.subevents.filter(id__in=[i for i in se_ids if i and i not in self._items_cache])
})
def _update_items_cache(self, item_ids: List[int], variation_ids: List[int]):
self._items_cache.update({
i.pk: i
for i in self.event.items.select_related('category').prefetch_related(
'addons', 'addons__addon_category', 'quotas'
).filter(
id__in=[i for i in item_ids if i and i not in self._items_cache]
)
})
self._variations_cache.update({
v.pk: v
for v in ItemVariation.objects.filter(item__event=self.event).prefetch_related(
'quotas'
).select_related('item', 'item__event').filter(
id__in=[i for i in variation_ids if i and i not in self._variations_cache]
)
})
def _check_max_cart_size(self):
cartsize = self.positions.filter(addon_to__isnull=True).count()
cartsize += sum([op.count for op in self._operations if isinstance(op, self.AddOperation) and not op.addon_to])
cartsize -= len([1 for op in self._operations if isinstance(op, self.RemoveOperation) if
not op.position.addon_to_id])
if cartsize > int(self.event.settings.max_items_per_order):
# TODO: i18n plurals
raise CartError(_(error_messages['max_items']) % (self.event.settings.max_items_per_order,))
def _check_item_constraints(self, op):
if isinstance(op, self.AddOperation) or isinstance(op, self.ExtendOperation):
if op.item.require_voucher and op.voucher is None:
raise CartError(error_messages['voucher_required'])
if op.item.hide_without_voucher and (op.voucher is None or op.voucher.item is None or op.voucher.item.pk != op.item.pk):
raise CartError(error_messages['voucher_required'])
if not op.item.is_available() or (op.variation and not op.variation.active):
raise CartError(error_messages['unavailable'])
if op.voucher and not op.voucher.applies_to(op.item, op.variation):
raise CartError(error_messages['voucher_invalid_item'])
if op.voucher and op.voucher.subevent_id and op.voucher.subevent_id != op.subevent.pk:
raise CartError(error_messages['voucher_invalid_subevent'])
if op.subevent and not op.subevent.active:
raise CartError(error_messages['inactive_subevent'])
if op.subevent and op.subevent.presale_start and self.now_dt < op.subevent.presale_start:
raise CartError(error_messages['not_started'])
if op.subevent and op.subevent.presale_has_ended:
raise CartError(error_messages['ended'])
if isinstance(op, self.AddOperation):
if op.item.category and op.item.category.is_addon and not op.addon_to:
raise CartError(error_messages['addon_only'])
if op.item.max_per_order or op.item.min_per_order:
new_total = (
len([1 for p in self.positions if p.item_id == op.item.pk]) +
sum([_op.count for _op in self._operations
if isinstance(_op, self.AddOperation) and _op.item == op.item]) +
op.count -
len([1 for _op in self._operations
if isinstance(_op, self.RemoveOperation) and _op.position.item_id == op.item.pk])
)
if op.item.max_per_order and new_total > op.item.max_per_order:
raise CartError(
_(error_messages['max_items_per_product']) % {
'max': op.item.max_per_order,
'product': op.item.name
}
)
if op.item.min_per_order and new_total < op.item.min_per_order:
raise CartError(
_(error_messages['min_items_per_product']) % {
'min': op.item.min_per_order,
'product': op.item.name
}
)
def _get_price(self, item: Item, variation: Optional[ItemVariation],
voucher: Optional[Voucher], custom_price: Optional[Decimal],
subevent: Optional[SubEvent], cp_is_net: bool=None):
return get_price(
item, variation, voucher, custom_price, subevent,
custom_price_is_net=cp_is_net if cp_is_net is not None else self.event.settings.display_net_prices,
invoice_address=self.invoice_address
)
def extend_expired_positions(self):
expired = self.positions.filter(expires__lte=self.now_dt).select_related(
'item', 'variation', 'voucher'
).prefetch_related('item__quotas', 'variation__quotas')
err = None
for cp in expired:
if not cp.includes_tax:
price = self._get_price(cp.item, cp.variation, cp.voucher, cp.price, cp.subevent,
cp_is_net=True)
price = TaxedPrice(net=price.net, gross=price.net, rate=0, tax=0, name='')
else:
price = self._get_price(cp.item, cp.variation, cp.voucher, cp.price, cp.subevent)
quotas = list(cp.quotas)
if not quotas:
self._operations.append(self.RemoveOperation(position=cp))
continue
err = error_messages['unavailable']
if not cp.voucher or (not cp.voucher.allow_ignore_quota and not cp.voucher.block_quota):
for quota in quotas:
self._quota_diff[quota] += 1
else:
quotas = []
op = self.ExtendOperation(
position=cp, item=cp.item, variation=cp.variation, voucher=cp.voucher, count=1,
price=price, quotas=quotas, subevent=cp.subevent
)
self._check_item_constraints(op)
if cp.voucher:
self._voucher_use_diff[cp.voucher] += 1
self._operations.append(op)
return err
def add_new_items(self, items: List[dict]):
# Fetch items from the database
self._update_items_cache([i['item'] for i in items], [i['variation'] for i in items])
self._update_subevents_cache([i['subevent'] for i in items if i.get('subevent')])
quota_diff = Counter()
voucher_use_diff = Counter()
operations = []
for i in items:
# Check whether the specified items are part of what we just fetched from the database
# If they are not, the user supplied item IDs which either do not exist or belong to
# a different event
if i['item'] not in self._items_cache or (i['variation'] and i['variation'] not in self._variations_cache):
raise CartError(error_messages['not_for_sale'])
if self.event.has_subevents:
if not i.get('subevent'):
raise CartError(error_messages['subevent_required'])
subevent = self._subevents_cache[int(i.get('subevent'))]
else:
subevent = None
item = self._items_cache[i['item']]
variation = self._variations_cache[i['variation']] if i['variation'] is not None else None
voucher = None
if i.get('voucher'):
try:
voucher = self.event.vouchers.get(code__iexact=i.get('voucher').strip())
except Voucher.DoesNotExist:
raise CartError(error_messages['voucher_invalid'])
else:
voucher_use_diff[voucher] += i['count']
# Fetch all quotas. If there are no quotas, this item is not allowed to be sold.
quotas = list(item.quotas.filter(subevent=subevent)
if variation is None else variation.quotas.filter(subevent=subevent))
if not quotas:
raise CartError(error_messages['unavailable'])
if not voucher or (not voucher.allow_ignore_quota and not voucher.block_quota):
for quota in quotas:
quota_diff[quota] += i['count']
else:
quotas = []
price = self._get_price(item, variation, voucher, i.get('price'), subevent)
op = self.AddOperation(
count=i['count'], item=item, variation=variation, price=price, voucher=voucher, quotas=quotas,
addon_to=False, subevent=subevent, includes_tax=bool(price.rate)
)
self._check_item_constraints(op)
operations.append(op)
self._quota_diff.update(quota_diff)
self._voucher_use_diff += voucher_use_diff
self._operations += operations
def remove_item(self, pos_id: int):
# TODO: We could calculate quotadiffs and voucherdiffs here, which would lead to more
# flexible usages (e.g. a RemoveOperation and an AddOperation in the same transaction
# could cancel each other out quota-wise). However, we are not taking this performance
# penalty for now as there is currently no outside interface that would allow building
# such a transaction.
try:
cp = self.positions.get(pk=pos_id)
except CartPosition.DoesNotExist:
raise CartError(error_messages['unknown_position'])
self._operations.append(self.RemoveOperation(position=cp))
def clear(self):
# TODO: We could calculate quotadiffs and voucherdiffs here, which would lead to more
# flexible usages (e.g. a RemoveOperation and an AddOperation in the same transaction
# could cancel each other out quota-wise). However, we are not taking this performance
# penalty for now as there is currently no outside interface that would allow building
# such a transaction.
for cp in self.positions.all():
self._operations.append(self.RemoveOperation(position=cp))
def set_addons(self, addons):
self._update_items_cache(
[a['item'] for a in addons],
[a['variation'] for a in addons],
)
# Prepare various containers to hold data later
current_addons = defaultdict(dict) # CartPos -> currently attached add-ons
input_addons = defaultdict(set) # CartPos -> add-ons according to input
selected_addons = defaultdict(set) # CartPos -> final desired set of add-ons
cpcache = {} # CartPos.pk -> CartPos
quota_diff = Counter() # Quota -> Number of usages
operations = []
available_categories = defaultdict(set) # CartPos -> Category IDs to choose from
price_included = defaultdict(dict) # CartPos -> CategoryID -> bool(price is included)
toplevel_cp = self.positions.filter(
addon_to__isnull=True
).prefetch_related(
'addons', 'item__addons', 'item__addons__addon_category'
).select_related('item', 'variation')
# Prefill some of the cache containers
for cp in toplevel_cp:
available_categories[cp.pk] = {iao.addon_category_id for iao in cp.item.addons.all()}
price_included[cp.pk] = {iao.addon_category_id: iao.price_included for iao in cp.item.addons.all()}
cpcache[cp.pk] = cp
current_addons[cp] = {
(a.item_id, a.variation_id): a
for a in cp.addons.all()
}
# Create operations, perform various checks
for a in addons:
# Check whether the specified items are part of what we just fetched from the database
# If they are not, the user supplied item IDs which either do not exist or belong to
# a different event
if a['item'] not in self._items_cache or (a['variation'] and a['variation'] not in self._variations_cache):
raise CartError(error_messages['not_for_sale'])
# Only attach addons to things that are actually in this user's cart
if a['addon_to'] not in cpcache:
raise CartError(error_messages['addon_invalid_base'])
cp = cpcache[a['addon_to']]
item = self._items_cache[a['item']]
variation = self._variations_cache[a['variation']] if a['variation'] is not None else None
if item.category_id not in available_categories[cp.pk]:
raise CartError(error_messages['addon_invalid_base'])
# Fetch all quotas. If there are no quotas, this item is not allowed to be sold.
quotas = list(item.quotas.filter(subevent=cp.subevent)
if variation is None else variation.quotas.filter(subevent=cp.subevent))
if not quotas:
raise CartError(error_messages['unavailable'])
# Every item can be attached to very CartPosition at most once
if a['item'] in ([_a[0] for _a in input_addons[cp.id]]):
raise CartError(error_messages['addon_duplicate_item'])
input_addons[cp.id].add((a['item'], a['variation']))
selected_addons[cp.id, item.category_id].add((a['item'], a['variation']))
if (a['item'], a['variation']) not in current_addons[cp]:
# This add-on is new, add it to the cart
for quota in quotas:
quota_diff[quota] += 1
if price_included[cp.pk].get(item.category_id):
price = TAXED_ZERO
else:
price = self._get_price(item, variation, None, None, cp.subevent)
op = self.AddOperation(
count=1, item=item, variation=variation, price=price, voucher=None, quotas=quotas,
addon_to=cp, subevent=cp.subevent, includes_tax=bool(price.rate)
)
self._check_item_constraints(op)
operations.append(op)
# Check constraints on the add-on combinations
for cp in toplevel_cp:
item = cp.item
for iao in item.addons.all():
selected = selected_addons[cp.id, iao.addon_category_id]
if len(selected) > iao.max_count:
# TODO: Proper i18n
# TODO: Proper pluralization
raise CartError(
error_messages['addon_max_count'],
{
'base': str(item.name),
'max': iao.max_count,
'cat': str(iao.addon_category.name),
}
)
elif len(selected) < iao.min_count:
# TODO: Proper i18n
# TODO: Proper pluralization
raise CartError(
error_messages['addon_min_count'],
{
'base': str(item.name),
'min': iao.min_count,
'cat': str(iao.addon_category.name),
}
)
# Detect removed add-ons and create RemoveOperations
for cp, al in current_addons.items():
for k, v in al.items():
if k not in input_addons[cp.id]:
if v.expires > self.now_dt:
quotas = list(v.quotas)
for quota in quotas:
quota_diff[quota] -= 1
op = self.RemoveOperation(position=v)
operations.append(op)
self._quota_diff.update(quota_diff)
self._operations += operations
def _get_quota_availability(self):
quotas_ok = defaultdict(int)
for quota, count in self._quota_diff.items():
if count <= 0:
quotas_ok[quota] = 0
avail = quota.availability(self.now_dt)
if avail[1] is not None and avail[1] < count:
quotas_ok[quota] = min(count, avail[1])
else:
quotas_ok[quota] = count
return quotas_ok
def _get_voucher_availability(self):
vouchers_ok = {}
for voucher, count in self._voucher_use_diff.items():
voucher.refresh_from_db()
if voucher.valid_until is not None and voucher.valid_until < self.now_dt:
raise CartError(error_messages['voucher_expired'])
redeemed_in_carts = CartPosition.objects.filter(
Q(voucher=voucher) & Q(event=self.event) &
Q(expires__gte=self.now_dt)
).exclude(pk__in=[
op.position.voucher_id for op in self._operations if isinstance(op, self.ExtendOperation)
])
v_avail = voucher.max_usages - voucher.redeemed - redeemed_in_carts.count()
vouchers_ok[voucher] = v_avail
return vouchers_ok
def _check_min_per_product(self):
per_product = Counter()
min_per_product = {}
for p in self.positions:
per_product[p.item_id] += 1
min_per_product[p.item.pk] = p.item.min_per_order
for op in self._operations:
if isinstance(op, self.AddOperation):
per_product[op.item.pk] += op.count
min_per_product[op.item.pk] = op.item.min_per_order
elif isinstance(op, self.RemoveOperation):
per_product[op.position.item_id] -= 1
min_per_product[op.position.item.pk] = op.position.item.min_per_order
err = None
for itemid, num in per_product.items():
min_p = min_per_product[itemid]
if min_p and num < min_p:
self._operations = [o for o in self._operations if not (
isinstance(o, self.AddOperation) and o.item.pk == itemid
)]
removals = [o.position.pk for o in self._operations if isinstance(o, self.RemoveOperation)]
for p in self.positions:
if p.item_id == itemid and p.pk not in removals:
self._operations.append(self.RemoveOperation(position=p))
err = _(error_messages['min_items_per_product_removed']) % {
'min': min_p,
'product': p.item.name
}
return err
def _perform_operations(self):
vouchers_ok = self._get_voucher_availability()
quotas_ok = self._get_quota_availability()
err = None
new_cart_positions = []
err = err or self._check_min_per_product()
self._operations.sort(key=lambda a: self.order[type(a)])
for op in self._operations:
if isinstance(op, self.RemoveOperation):
if op.position.expires > self.now_dt:
for q in op.position.quotas:
quotas_ok[q] += 1
op.position.delete()
elif isinstance(op, self.AddOperation) or isinstance(op, self.ExtendOperation):
# Create a CartPosition for as much items as we can
requested_count = quota_available_count = voucher_available_count = op.count
if op.quotas:
quota_available_count = min(requested_count, min(quotas_ok[q] for q in op.quotas))
if op.voucher:
voucher_available_count = min(voucher_available_count, vouchers_ok[op.voucher])
if quota_available_count < 1:
err = err or error_messages['unavailable']
elif quota_available_count < requested_count:
err = err or error_messages['in_part']
if voucher_available_count < 1:
err = err or error_messages['voucher_redeemed']
elif voucher_available_count < requested_count:
err = err or error_messages['voucher_redeemed_partial'] % voucher_available_count
available_count = min(quota_available_count, voucher_available_count)
for q in op.quotas:
quotas_ok[q] -= available_count
if op.voucher:
vouchers_ok[op.voucher] -= available_count
if isinstance(op, self.AddOperation):
for k in range(available_count):
new_cart_positions.append(CartPosition(
event=self.event, item=op.item, variation=op.variation,
price=op.price.gross, expires=self._expiry, cart_id=self.cart_id,
voucher=op.voucher, addon_to=op.addon_to if op.addon_to else None,
subevent=op.subevent, includes_tax=op.includes_tax
))
elif isinstance(op, self.ExtendOperation):
if available_count == 1:
op.position.expires = self._expiry
op.position.price = op.price.gross
op.position.save()
elif available_count == 0:
op.position.delete()
else:
raise AssertionError("ExtendOperation cannot affect more than one item")
CartPosition.objects.bulk_create(new_cart_positions)
return err
def commit(self):
self._check_presale_dates()
self._check_max_cart_size()
self._calculate_expiry()
with self.event.lock() as now_dt:
with transaction.atomic():
self.now_dt = now_dt
self._extend_expiry_of_valid_existing_positions()
err = self._delete_out_of_timeframe()
err = self.extend_expired_positions() or err
err = self._perform_operations() or err
if err:
raise CartError(err)
def update_tax_rates(event: Event, cart_id: str, invoice_address: InvoiceAddress):
positions = CartPosition.objects.filter(
cart_id=cart_id, event=event
).select_related('item', 'item__tax_rule')
totaldiff = Decimal('0.00')
for pos in positions:
if not pos.item.tax_rule:
continue
charge_tax = pos.item.tax_rule.tax_applicable(invoice_address)
if pos.includes_tax and not charge_tax:
price = pos.item.tax(pos.price, base_price_is='gross').net
totaldiff += price - pos.price
pos.price = price
pos.includes_tax = False
pos.save(update_fields=['price', 'includes_tax'])
elif charge_tax and not pos.includes_tax:
price = pos.item.tax(pos.price, base_price_is='net').gross
totaldiff += price - pos.price
pos.price = price
pos.includes_tax = True
pos.save(update_fields=['price', 'includes_tax'])
return totaldiff
def get_fees(event, request, total, invoice_address, provider):
fees = []
if provider and total != 0:
provider = event.get_payment_providers().get(provider)
if provider:
payment_fee = provider.calculate_fee(total)
if payment_fee:
payment_fee_tax_rule = event.settings.tax_rate_default or TaxRule.zero()
if payment_fee_tax_rule.tax_applicable(invoice_address):
payment_fee_tax = payment_fee_tax_rule.tax(payment_fee, base_price_is='gross')
fees.append(OrderFee(
fee_type=OrderFee.FEE_TYPE_PAYMENT,
value=payment_fee,
tax_rate=payment_fee_tax.rate,
tax_value=payment_fee_tax.tax,
tax_rule=payment_fee_tax_rule
))
else:
fees.append(OrderFee(
fee_type=OrderFee.FEE_TYPE_PAYMENT,
value=payment_fee,
tax_rate=Decimal('0.00'),
tax_value=Decimal('0.00'),
tax_rule=payment_fee_tax_rule
))
for recv, resp in fee_calculation_for_cart.send(sender=event, request=request, invoice_address=invoice_address,
total=total):
fees += resp
return fees
@app.task(base=ProfiledTask, bind=True, max_retries=5, default_retry_delay=1, throws=(CartError,))
def add_items_to_cart(self, event: int, items: List[dict], cart_id: str=None, locale='en',
invoice_address: int=None) -> None:
"""
Adds a list of items to a user's cart.
:param event: The event ID in question
:param items: A list of dicts with the keys item, variation, number, custom_price, voucher
:param cart_id: Session ID of a guest
:raises CartError: On any error that occured
"""
with language(locale):
event = Event.objects.get(id=event)
ia = False
if invoice_address:
try:
ia = InvoiceAddress.objects.get(pk=invoice_address)
except InvoiceAddress.DoesNotExist:
pass
try:
try:
cm = CartManager(event=event, cart_id=cart_id, invoice_address=ia)
cm.add_new_items(items)
cm.commit()
except LockTimeoutException:
self.retry()
except (MaxRetriesExceededError, LockTimeoutException):
raise CartError(error_messages['busy'])
@app.task(base=ProfiledTask, bind=True, max_retries=5, default_retry_delay=1, throws=(CartError,))
def remove_cart_position(self, event: int, position: int, cart_id: str=None, locale='en') -> None:
"""
Removes a list of items from a user's cart.
:param event: The event ID in question
:param position: A cart position ID
:param session: Session ID of a guest
"""
with language(locale):
event = Event.objects.get(id=event)
try:
try:
cm = CartManager(event=event, cart_id=cart_id)
cm.remove_item(position)
cm.commit()
except LockTimeoutException:
self.retry()
except (MaxRetriesExceededError, LockTimeoutException):
raise CartError(error_messages['busy'])
@app.task(base=ProfiledTask, bind=True, max_retries=5, default_retry_delay=1, throws=(CartError,))
def clear_cart(self, event: int, cart_id: str=None, locale='en') -> None:
"""
Removes a list of items from a user's cart.
:param event: The event ID in question
:param session: Session ID of a guest
"""
with language(locale):
event = Event.objects.get(id=event)
try:
try:
cm = CartManager(event=event, cart_id=cart_id)
cm.clear()
cm.commit()
except LockTimeoutException:
self.retry()
except (MaxRetriesExceededError, LockTimeoutException):
raise CartError(error_messages['busy'])
@app.task(base=ProfiledTask, bind=True, max_retries=5, default_retry_delay=1, throws=(CartError,))
def set_cart_addons(self, event: int, addons: List[dict], cart_id: str=None, locale='en',
invoice_address: int=None) -> None:
"""
Removes a list of items from a user's cart.
:param event: The event ID in question
:param addons: A list of dicts with the keys addon_to, item, variation
:param session: Session ID of a guest
"""
with language(locale):
event = Event.objects.get(id=event)
ia = False
if invoice_address:
try:
ia = InvoiceAddress.objects.get(pk=invoice_address)
except InvoiceAddress.DoesNotExist:
pass
try:
try:
cm = CartManager(event=event, cart_id=cart_id, invoice_address=ia)
cm.set_addons(addons)
cm.commit()
except LockTimeoutException:
self.retry()
except (MaxRetriesExceededError, LockTimeoutException):
raise CartError(error_messages['busy'])
@receiver(checkout_confirm_messages, dispatch_uid="cart_confirm_messages")
def confirm_messages(sender, *args, **kwargs):
if not sender.settings.confirm_text:
return {}
return {
'confirm_text': rich_text(str(sender.settings.confirm_text))
}
| 45.431298 | 132 | 0.600493 |
9a93b9f141102e81835d5740d69a32ba48e9ce66 | 700 | py | Python | var/spack/repos/builtin/packages/py-mysqldb1/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-mysqldb1/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/py-mysqldb1/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyMysqldb1(PythonPackage):
"""Legacy mysql bindings for python"""
homepage = "https://github.com/farcepest/MySQLdb1"
url = "https://github.com/farcepest/MySQLdb1/archive/MySQLdb-1.2.5.tar.gz"
version('1.2.5', sha256='905dd8be887ff596641ace5411fed17cfd08dd33699ea627d3fb44f8a922c2f0',
url="https://github.com/farcepest/MySQLdb1/archive/MySQLdb-1.2.5.tar.gz")
depends_on('mysql@:6')
depends_on('py-setuptools', type='build')
| 36.842105 | 95 | 0.728571 |
7d58e4682dcef910da11601740d1aff3a83642f3 | 19,333 | py | Python | src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/operations/_express_route_circuit_connections_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 2 | 2021-03-24T21:06:20.000Z | 2021-03-24T21:07:58.000Z | src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/operations/_express_route_circuit_connections_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 3 | 2020-05-27T20:16:26.000Z | 2020-07-23T19:46:49.000Z | src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/operations/_express_route_circuit_connections_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 5 | 2020-05-09T17:47:09.000Z | 2020-10-01T19:52:06.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ExpressRouteCircuitConnectionsOperations(object):
"""ExpressRouteCircuitConnectionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2020-05-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-05-01"
self.config = config
def _delete_initial(
self, resource_group_name, circuit_name, peering_name, connection_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, circuit_name, peering_name, connection_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified Express Route Circuit Connection from the
specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit
connection.
:type connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'}
def get(
self, resource_group_name, circuit_name, peering_name, connection_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified Express Route Circuit Connection from the specified
express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit
connection.
:type connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitConnection or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitConnection
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'}
def _create_or_update_initial(
self, resource_group_name, circuit_name, peering_name, connection_name, express_route_circuit_connection_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, circuit_name, peering_name, connection_name, express_route_circuit_connection_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a Express Route Circuit Connection in the specified
express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit
connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters
supplied to the create or update express route circuit connection
operation.
:type express_route_circuit_connection_parameters:
~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitConnection or
ClientRawResponse<ExpressRouteCircuitConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitConnection]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'}
def list(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, **operation_config):
"""Gets all global reach connections associated with a private peering in
an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuitConnection
:rtype:
~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitConnectionPaged[~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitConnection]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ExpressRouteCircuitConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections'}
| 48.944304 | 224 | 0.681012 |
c95239e59f82976ab4acfe7e85bd943bf0840df8 | 751 | py | Python | tests/test_one_sample_server.py | dionresearch/hotelling | ac6ef7d3674f1e9af7511d3a1fb205ee40b321b7 | [
"MIT"
] | 12 | 2020-07-04T13:50:04.000Z | 2021-07-17T08:07:50.000Z | tests/test_one_sample_server.py | dionresearch/hotelling | ac6ef7d3674f1e9af7511d3a1fb205ee40b321b7 | [
"MIT"
] | 19 | 2021-01-15T10:07:24.000Z | 2021-07-30T13:34:13.000Z | tests/test_one_sample_server.py | dionresearch/hotelling | ac6ef7d3674f1e9af7511d3a1fb205ee40b321b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `hotelling` package."""
import numpy as np
import pytest
import pandas as pd
from hotelling.helpers import load_df
from hotelling.stats import hotelling_t2
try:
import distributed
def test_hotelling_test_csv_one_sample_server():
x = load_df('data/shoes.csv', server="localhost", index_col='Subject')
res = hotelling_t2(x, np.asarray([7, 8, 5, 7, 9]))
assert round(res[0], 4) == 52.6724 # T2
assert round(res[1], 4) == 8.7787 # F
assert round(res[2], 5) == 0.00016 # P-value
except ModuleNotFoundError:
@pytest.mark.skip(reason="distributed module is not available")
def test_hotelling_test_csv_one_sample_server():
pass | 28.884615 | 78 | 0.671105 |
2213f2fafff58ffcbebe481561f8c32178533125 | 613 | py | Python | utils/get_norm.py | feevos/ceecnet | 9dc76f8cd16d44b264cae8c5846eefb8fcf6162d | [
"BSD-3-Clause",
"MIT"
] | 45 | 2020-09-07T01:19:44.000Z | 2022-03-15T14:44:20.000Z | FracTAL_ResUNet/utils/get_norm.py | m0rp43us/decode | bed6f6b4173f49362a5113207155cc103e8fd139 | [
"BSD-3-Clause",
"MIT"
] | 10 | 2020-10-02T10:14:47.000Z | 2021-10-19T09:34:14.000Z | FracTAL_ResUNet/utils/get_norm.py | m0rp43us/decode | bed6f6b4173f49362a5113207155cc103e8fd139 | [
"BSD-3-Clause",
"MIT"
] | 14 | 2020-09-29T02:46:18.000Z | 2021-09-27T07:13:47.000Z | import mxnet as mx
from mxnet import gluon
def get_norm(name, axis=1, norm_groups=None):
if (name == 'BatchNorm'):
return gluon.nn.BatchNorm(axis=axis)
elif (name == 'InstanceNorm'):
return gluon.nn.InstanceNorm(axis=axis)
elif (name == 'LayerNorm'):
return gluon.nn.LayerNorm(axis=axis)
elif (name == 'GroupNorm' and norm_groups is not None):
return gluon.nn.GroupNorm(num_groups = norm_groups) # applied to channel axis
else:
raise NotImplementedError
| 40.866667 | 86 | 0.564437 |
0c8029590cc579ff8d4bb1913923fc2c84382cb1 | 3,182 | py | Python | resources/usr/lib/python2.7/dist-packages/numpy/oldnumeric/compat.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/lib/python2.7/dist-packages/numpy/oldnumeric/compat.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/lib/python2.7/dist-packages/numpy/oldnumeric/compat.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | 1 | 2020-05-28T23:01:44.000Z | 2020-05-28T23:01:44.000Z | # Compatibility module containing deprecated names
__all__ = ['NewAxis',
'UFuncType', 'UfuncType', 'ArrayType', 'arraytype',
'LittleEndian', 'arrayrange', 'matrixmultiply',
'array_constructor', 'pickle_array',
'DumpArray', 'LoadArray', 'multiarray',
# from cPickle
'dump', 'dumps', 'load', 'loads',
'Unpickler', 'Pickler'
]
import numpy.core.multiarray as multiarray
import numpy.core.umath as um
from numpy.core.numeric import array
import functions
import sys
from cPickle import dump, dumps
mu = multiarray
#Use this to add a new axis to an array
#compatibility only
NewAxis = None
#deprecated
UFuncType = type(um.sin)
UfuncType = type(um.sin)
ArrayType = mu.ndarray
arraytype = mu.ndarray
LittleEndian = (sys.byteorder == 'little')
from numpy import deprecate
# backward compatibility
arrayrange = deprecate(functions.arange, 'arrayrange', 'arange')
# deprecated names
matrixmultiply = deprecate(mu.dot, 'matrixmultiply', 'dot')
def DumpArray(m, fp):
m.dump(fp)
def LoadArray(fp):
import cPickle
return cPickle.load(fp)
def array_constructor(shape, typecode, thestr, Endian=LittleEndian):
if typecode == "O":
x = array(thestr, "O")
else:
x = mu.fromstring(thestr, typecode)
x.shape = shape
if LittleEndian != Endian:
return x.byteswap(True)
else:
return x
def pickle_array(a):
if a.dtype.hasobject:
return (array_constructor,
a.shape, a.dtype.char, a.tolist(), LittleEndian)
else:
return (array_constructor,
(a.shape, a.dtype.char, a.tostring(), LittleEndian))
def loads(astr):
import cPickle
arr = cPickle.loads(astr.replace('Numeric', 'numpy.oldnumeric'))
return arr
def load(fp):
return loads(fp.read())
def _LoadArray(fp):
import typeconv
ln = fp.readline().split()
if ln[0][0] == 'A': ln[0] = ln[0][1:]
typecode = ln[0][0]
endian = ln[0][1]
itemsize = int(ln[0][2:])
shape = [int(x) for x in ln[1:]]
sz = itemsize
for val in shape:
sz *= val
dstr = fp.read(sz)
m = mu.fromstring(dstr, typeconv.convtypecode(typecode))
m.shape = shape
if (LittleEndian and endian == 'B') or (not LittleEndian and endian == 'L'):
return m.byteswap(True)
else:
return m
import pickle, copy
if sys.version_info[0] >= 3:
class Unpickler(pickle.Unpickler):
# XXX: should we implement this? It's not completely straightforward
# to do.
def __init__(self, *a, **kw):
raise NotImplementedError(
"numpy.oldnumeric.Unpickler is not supported on Python 3")
else:
class Unpickler(pickle.Unpickler):
def load_array(self):
self.stack.append(_LoadArray(self))
dispatch = copy.copy(pickle.Unpickler.dispatch)
dispatch['A'] = load_array
class Pickler(pickle.Pickler):
def __init__(self, *args, **kwds):
raise NotImplementedError, "Don't pickle new arrays with this"
def save_array(self, object):
raise NotImplementedError, "Don't pickle new arrays with this"
| 26.966102 | 80 | 0.637335 |
2c75c7156e7721bb0ecb0cb9fcc0322f08f72ebd | 12,963 | py | Python | lesson 7/cython/cython_compile.py | gtpedrosa/Python4WindEnergy | f8ad09018420cfb3a419173f97b129de7118d814 | [
"Apache-2.0"
] | 48 | 2015-01-19T18:21:10.000Z | 2021-11-27T22:41:06.000Z | lesson 7/cython/cython_compile.py | arash7444/Python4WindEnergy | 8f97a5f86e81ce01d80dafb6f8104165fd3ad397 | [
"Apache-2.0"
] | 1 | 2016-05-24T06:07:07.000Z | 2016-05-24T08:26:29.000Z | lesson 7/cython/cython_compile.py | arash7444/Python4WindEnergy | 8f97a5f86e81ce01d80dafb6f8104165fd3ad397 | [
"Apache-2.0"
] | 24 | 2015-06-26T14:44:07.000Z | 2021-06-07T18:36:52.000Z | '''
Created on 10/07/2013
@author: Mads M. Pedersen (mmpe@dtu.dk)
Wrapper functions and decorators for compiling functions using Cython
'''
import inspect
import os
import re
import shutil
import subprocess
import sys
import numpy as np
import warnings
def wrap(f, autodeclare, *args, **kwargs):
"""
Wrapper function returned by the cython_compile and cython_compile_autodeclare decorators
:param f: Function to compile
:type f: function
:param py2pyx_func: py2pyx or py2pyx_autodeclare
:type py2pyx_func: function
"""
# Generate name: "c:\documents\project\mymodule.py" -> mymodule_myfunction
# When called from ipython notebooks, filename is an object e.g: "<ipython-input-12-e897f9fefc0c>"
# therefore <,>,- is removed to make it a legal python module name
name = os.path.relpath(
inspect.getabsfile(f),
os.getcwd()).replace(".py",
"")
name = name.replace("<", "").replace(">", "").replace("-", "")
name = "%s_%s" % (name, f.func_name)
module = name.replace(os.path.sep, ".")
# import compiled module if exists, otherwise compile and import
try:
cmodule = __import__(module)
except ImportError:
# Generate pyrex code lines
if autodeclare:
pyx_lines = py2pyx_autodeclare(f, args, kwargs.copy())
else:
# source lines except '@cython_compile'
source_lines = inspect.getsourcelines(f)[0][1:]
pyx_lines = py2pyx(source_lines)
# Write pyrex code lines to .pyx file
pyx_filename = name + ".pyx"
with open(pyx_filename, 'w') as fid:
fid.writelines(pyx_lines)
# compile, import compiled module and delete temporary files
cmodule = compile_and_cleanup(module, pyx_filename)
try:
cf = getattr(cmodule, f.func_name)
if kwargs == {}:
return cf(*args)
else:
return cf(*args, **kwargs)
except AttributeError:
warnings.warn(
"Compilation or import of %s failed. Python function used instead" %
f)
return f(*args, **kwargs)
def cython_compile(f):
"""Decorator for compilation, import and execution of the function, f.
Variables can be declared using Pure or cdef syntax, see module description
Example:
@cython_compile
def my_func(p):
pass
"""
w = lambda *args, **kwargs: wrap(f, False, *args, **kwargs)
w.__name__ = f.__name__
return w
def cython_compile_autodeclare(f):
"""Decorator for autodeclaring, compilation, import and execution of the function, f.
Declared variables using cdef syntax overrides autodeclaration, see module description
Example:
@cython_compile_autocompile
def my_func(p):
pass
"""
w = lambda *args, **kwargs: wrap(f, True, *args, **kwargs)
w.__name__ = f.__name__
return w
def cython_import(module, compiler=None):
"""Compiles and imports a module. Use it similar to the normal import
Example (import my_func from my_module):
from cython_compile import cython_import
cython_import('my_module')
import my_module # import must be after cython_import statement
my_module.my_func()
"""
exec("import %s" % module)
pyd_module = module
if not is_compiled(eval(pyd_module)):
# Read py-module
file_path = module.replace(".", "/") + ".py"
fid = open(file_path)
pylines = fid.readlines()
fid.close()
# write pyrex file
pyx_filename = file_path.replace('.py', '.pyx')
fid = open(pyx_filename, 'w')
pyxlines = py2pyx(pylines)
fid.writelines(pyxlines)
fid.close()
# compile, import compiled module and delete temporary files
compile_and_cleanup(module, pyx_filename, compiler)
def compile_and_cleanup(module, pyx_filename, compiler=None):
"""compile, import compiled module and delete temporary files"""
# Generate setup.py script
fid = open('setup.py', 'w')
setup_str = """from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
ext_modules = [Extension("%s", ["%s"], include_dirs = [numpy.get_include()])]
setup(
name = 'name',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)""" % (module, pyx_filename)
fid.write(setup_str)
fid.close()
# create compile command
if compiler is not None:
compiler_str = "--compiler=%s" % compiler
else:
if os.name == 'nt' and "mingw" in os.environ['path'].lower():
compiler_str = "--compiler=mingw32"
else:
compiler_str = ""
bin_python = os.path.basename(sys.executable)
cmd = "%s setup.py build_ext --inplace %s" % (bin_python, compiler_str)
# compile
print "compiling %s: %s" % (module, cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
(out, err) = proc.communicate()
# Reload and check that module is compiled
try:
cmodule = __import__(module)
reload(cmodule)
except ImportError:
cmodule = None
if cmodule is None or is_compiled(cmodule) == False:
line = '\n' + '=' * 79 + '\n'
sys.stderr.write(
"%s was not compiled correctly. It may result in slower execution" %
module)
sys.stderr.write('%sstdout:%s%s' % (line, line, out))
sys.stderr.write('%sstderr:%s%s' % (line, line, err))
else:
print "Compiling succeeded"
# Clean up. Remove temporary files and folders
if os.path.isdir("build"):
shutil.rmtree("build")
for f in ['setup.py', pyx_filename.replace(".pyx", '.c')]:
if os.path.isfile(f):
os.remove(f)
return cmodule
def py2pyx_autodeclare(f, args, kwargs):
"""Generate pyrex code of function f and its input arguments
This function invokes py2pyx and extends with autodeclarations:
- arguments: input arguments are declared based their values in args and kwargs
- cdef: Variables declared by cdef, overrides autodeclaration, e.g:
"def func(a): #cpdef func(int a):" -> "cpdef func(int a):" (independent of type of a)
"#cdef int a" - "cdef int a" (independent of type of a)
- assignment: Variables assigned in function, e.g.
"a = xxx"
are declared based on the type of eval(xxx)
- in: variables returned by iterators, e.g:
"for a in xrange(5)",
"for a,b in [(1,.2)]:"
are declared base on the type of the first element in eval(iterator), e.g. "type(eval(xrange(5)[0]))"
"""
arg_names, _, _, defaults = inspect.getargspec(f)
# update kwargs with defaults
if defaults:
for k, v in zip(arg_names[::-1], defaults[::-1]):
if k not in kwargs:
kwargs[k] = v
kwargs.update(dict(zip(arg_names, args)))
# get pyx code lines using py2pyx
lines = inspect.getsourcelines(f)[0]
if lines[0].strip() == "@cython_compile_autodeclare":
lines = lines[1:]
lines = py2pyx(lines)
# prepare regular expressions
var_name = "(?:_*[a-zA-Z][a-zA-Z0-9_]*)" # optional "_" + one alpha + [0..n] x alphanum. "?:" = no group
reg_args = re.compile("[ \t]*def *(?:%s) *\(([^:]*)\) *:" % (var_name))
reg_cdef = re.compile(
"[ \t]*cdef *(?:(?:signed|unsigned|int|long|float|double|np\.ndarray\[.*\]) *)*(%s)" %
var_name)
reg_assign = re.compile('[ \t]*(%s) *= (.*)' % var_name)
reg_in = re.compile(
'[ \t]*for *(%s(?:, *%s)*) *in *(.*):' %
(var_name, var_name))
def declare_str(var_name, var_value):
"""Generate declaration string '<type(var_value)> <var_name>' e.g:
declare_str('a',1) -> "long a"
"""
if isinstance(var_value, (int)):
return "long %s" % var_name
if isinstance(var_value, long):
return "long long %s" % var_name
if isinstance(var_value, float):
return "double %s" % var_name
elif isinstance(var_value, np.ndarray):
return "np.ndarray[%s_t,ndim=%d] %s" % (var_value.dtype, len(var_value.shape), var_name)
else:
raise NotImplementedError(type(var_value))
defs = {} # dict for known local variables
def_line = None # line nr of "def func():". Autodeclaration of local field inserted below this line
for i, line in enumerate(lines):
if def_line is None and 'def' in line:
# first line containing "def" = function declaration line
def_line = i
match = reg_args.match(line)
if match is not None:
args = match.group(
1).strip(
) # line=" def func(xxx):#comment" -> args='xxx'
arg_strs = []
if args != "":
for arg in args.split(','):
arg_name = arg.split('=')[0].strip()
arg_value = kwargs[arg_name]
try:
arg_strs.append(
arg.strip(
).replace(
arg_name,
declare_str(
arg_name,
arg_value),
1))
except NotImplementedError:
arg_strs.append(arg)
# replace function declaration line
lines[i] = '%scpdef %s(%s):\n' % (
" " * line.index('def'),
f.__name__,
", ".join(arg_strs))
elif line.lstrip().startswith("import "):
# add imported moduled to kwargs -> enable evaluation of variables
import_module = line.replace("import ", "", 1).strip()
kwargs[import_module] = __import__(import_module)
else:
match = reg_cdef.match(line)
if match is not None:
# line contains a 'cdef' declaration.
# Add to defs to avoid redeclaration
var_name = match.group(1)
if var_name not in defs:
defs[var_name] = None
match = reg_assign.match(line)
if match is not None:
# line contains an assignment, e.g. a = xxx.
# Try to evaluate xxx and declare a as type of eval(xxx)
try:
var_name = match.group(1)
if var_name not in defs:
var_value = eval(match.group(2), globals(), kwargs)
defs[var_name] = declare_str(
var_name.strip(), var_value)
kwargs[var_name] = var_value
except NotImplementedError:
pass
match = reg_in.match(line)
if match is not None:
# line contains 'for xxx in yyy:'.
# Try to evaluate yyy and declare xxx as type of first element
# of eval(yyy)
var_names = [v.strip() for v in match.group(1).split(",")]
var_values = eval(match.group(2), globals(), kwargs)[0]
if not isinstance(var_values, (list, tuple)):
var_values = (var_values,)
for var_name, var_value in zip(var_names, var_values):
try:
if var_name not in defs:
defs[var_name] = declare_str(var_name, var_value)
kwargs[var_name] = var_value
except NotImplementedError:
pass
indent = lines[def_line + 1].replace(lines[def_line + 1].lstrip(), "")
# Insert declaration of local fields ordered by name just below function
# declaration
for key in sorted(defs.keys(), reverse=True):
if defs[key] is not None:
lines.insert(def_line + 1, "%scdef %s\n" % (indent, defs[key]))
return lines
def py2pyx(pylines):
"""Generate pyrex code lines from python code lines
- Adds import of cython and numpy
- searches every line for "#c". If found text before "#c" is replaced with text after "#c", e.g:
"def func(a): #cpdef func(int a):" -> "cpdef func(int a):
"#cdef int b" -> "cdef int b"
"""
pyxlines = ['import cython\n', 'cimport numpy as np\n']
for l in pylines:
if "#c" in l:
indent = l[:len(l) - len(l.lstrip())]
cdef = l[l.index("#c") + 1:]
l = indent + cdef
pyxlines.append(l)
return pyxlines
def is_compiled(module):
return module.__file__.lower()[-4:] == ".pyd" or module.__file__.lower()[-3:] == ".so"
| 35.809392 | 109 | 0.566767 |
e345cecbeaaeeeb1824dfc0fd92b57435d5a2148 | 4,816 | py | Python | chatgui.py | esh04/Intelligent-Chatbot | 8bd2d5e4f98d3bf57d2b4fc3e0fe681afdaaf4bb | [
"MIT"
] | null | null | null | chatgui.py | esh04/Intelligent-Chatbot | 8bd2d5e4f98d3bf57d2b4fc3e0fe681afdaaf4bb | [
"MIT"
] | null | null | null | chatgui.py | esh04/Intelligent-Chatbot | 8bd2d5e4f98d3bf57d2b4fc3e0fe681afdaaf4bb | [
"MIT"
] | null | null | null | import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from keras.models import load_model
model = load_model('./Data/chatbot_model.h5')
import json
import random
intents = json.loads(open('./Data/intents.json').read())
words = pickle.load(open('./Data/words.pkl','rb'))
classes = pickle.load(open('./Data/classes.pkl','rb'))
from Pyscripts.statistics import covidStatistic
from Pyscripts.state_finder import state_finder
from Pyscripts.news import news
from Pyscripts.predictor import covid_pred
def clean_up_sentence(sentence):
# tokenize the pattern - split words into array
sentence_words = nltk.word_tokenize(sentence)
# stem each word - create short form for word
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
def getResponse(ints, intents_json, msg):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(tag== 'statistics'): #can u give me covid statistics for
result = covidStatistic(state_finder(msg))
# result = covidStatistic("Karnataka")
break
elif(tag== 'news'): #can u give me covid statistics for
result = news(state_finder(msg))
# result = covidStatistic("Karnataka")
break
elif(i['tag']== tag): # if(tag='hospital') #call hospital function
result = random.choice(i['responses'])
break
return result
def chatbot_response(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents, msg)
return res
#Creating GUI with tkinter
import tkinter
from tkinter import *
def send():
msg = EntryBox.get("1.0",'end-1c').strip()
EntryBox.delete("0.0",END)
if msg != '':
ChatLog.config(state=NORMAL)
ChatLog.insert(END, "You: " + msg + '\n\n')
ChatLog.config(foreground="#442265", font=("Verdana", 12 ))
if msg[0]=='0' or msg[0]=='1':
check = covid_pred(tuple(eval(msg)))
ChatLog.insert(END, "Bot: " + str(check)[0:5]+ "% chance you have COVID" + '\n\n')
if check > 50:
ChatLog.insert(END, "Bot: Please go and visit a doctor"+ '\n\n')
else:
ChatLog.insert(END, "Bot: You are safe, please take care"+ '\n\n')
else:
res = chatbot_response(msg)
ChatLog.insert(END, "Bot: " + res + '\n\n')
ChatLog.config(state=DISABLED)
ChatLog.yview(END)
base = Tk()
base.title("COVID-19 CHATBOT")
base.geometry("400x500")
base.resizable(width=FALSE, height=FALSE)
#Create Chat window
ChatLog = Text(base, bd=0, bg="white", height="8", width="50", font="Arial",)
ChatLog.config(state=DISABLED)
#Bind scrollbar to Chat window
scrollbar = Scrollbar(base, command=ChatLog.yview, cursor="heart")
ChatLog['yscrollcommand'] = scrollbar.set
#Create Button to send message
SendButton = Button(base, font=("Verdana",12,'bold'), text="Send", width="12", height=5,
bd=0, bg="#32de97", activebackground="#3c9d9b",fg='#ffffff',
command= send )
#Create the box to enter message
EntryBox = Text(base, bd=0, bg="white",width="29", height="5", font="Arial")
#EntryBox.bind("<Return>", send)
#Place all components on the screen
scrollbar.place(x=376,y=6, height=386)
ChatLog.place(x=6,y=6, height=386, width=370)
EntryBox.place(x=128, y=401, height=90, width=265)
SendButton.place(x=6, y=401, height=90)
base.mainloop()
| 34.647482 | 95 | 0.624792 |
ad90400afe5c2be41d9c87e78fd4ea603b69418f | 1,188 | py | Python | Exercise/exercise_queries.py | Mihai925/EduCoding-Legacy | 7c6de105deb186c3442f8d7f9f1b9f99708f8fb6 | [
"MIT"
] | null | null | null | Exercise/exercise_queries.py | Mihai925/EduCoding-Legacy | 7c6de105deb186c3442f8d7f9f1b9f99708f8fb6 | [
"MIT"
] | null | null | null | Exercise/exercise_queries.py | Mihai925/EduCoding-Legacy | 7c6de105deb186c3442f8d7f9f1b9f99708f8fb6 | [
"MIT"
] | null | null | null | from Class.models import Class
from Exercise.models import Exercise
def assign_exercise_to_class(ex_id, cls_id):
a_class = Class.objects.get(cls_id=cls_id)
exercise = Exercise.objects.get(ex_id=ex_id)
exercise.classes_assigned_to.add(a_class)
def get_exercises_for_reacher(teacher):
return Exercise.objects.filter(author=teacher)
def get_assigned_exercise_for_class(cls_id):
a_class = Class.objects.get(cls_id=cls_id)
exercises = Exercise.objects.filter(classes_assigned_to=a_class)
return exercises
#return [(exercise.title.encode("ascii", "ignore"), exercise.description.encode("ascii","ignore")) for exercise in exercises]
def get_assigned_exercise_for_students(student):
student_classes = Class.objects.filter(students=student)
ret = []
for student_class in student_classes:
class_name = Class.objects.get(cls_id=student_class.cls_id)
exercises = get_assigned_exercise_for_class(student_class.cls_id)
ret+= [(class_name.name, exercises)]
#for exercise_tuples in [get_assigned_exercise_for_class(student_class.cls_id) for student_class in student_classes]:
# ret += exercise_tuples
return ret
| 36 | 129 | 0.765993 |
bb07c04eb09df99738240b88d531a7a6ed661b6c | 399 | py | Python | facebook_example/member/admin.py | BeshoyAtef/StudentsPortal | 2df13b92ff3bfb84cc4d5aa8fd844339dabf4643 | [
"BSD-3-Clause"
] | null | null | null | facebook_example/member/admin.py | BeshoyAtef/StudentsPortal | 2df13b92ff3bfb84cc4d5aa8fd844339dabf4643 | [
"BSD-3-Clause"
] | null | null | null | facebook_example/member/admin.py | BeshoyAtef/StudentsPortal | 2df13b92ff3bfb84cc4d5aa8fd844339dabf4643 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from member.models import *
from django_facebook import *
try:
from auth import User
except:
pass
try:
admin.site.register(member.UserProfile)
admin.site.register(memeber.MyCustomProfile)
except:
pass
try:
admin.site.register(django_facebook.FacebookCustomUser)
except:
pass
try:
admin.site.register(auth.User)
except:
pass | 17.347826 | 59 | 0.734336 |
0a2f961eaaef24f69072c0a809b8c3f3f89bb4f0 | 4,694 | py | Python | texar/utils/mode.py | eff-kay/temp-texar-repo | 5c6ee6645c1d78f8294e2a07d111dbb02cd9547e | [
"Apache-2.0"
] | 87 | 2019-09-11T04:41:46.000Z | 2022-03-23T02:37:52.000Z | texar/utils/mode.py | ysglh/texar | 9c699e8143fd8ecb5d65a41ceef09c45832b9258 | [
"Apache-2.0"
] | 10 | 2019-10-01T16:09:17.000Z | 2021-10-19T21:20:11.000Z | texar/utils/mode.py | ysglh/texar | 9c699e8143fd8ecb5d65a41ceef09c45832b9258 | [
"Apache-2.0"
] | 20 | 2019-09-13T16:32:37.000Z | 2021-06-03T07:14:11.000Z | # Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions related to mode.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from texar import context
__all__ = [
"maybe_global_mode",
"is_train_mode",
"is_eval_mode",
"is_predict_mode",
"is_train_mode_py",
"is_eval_mode_py",
"is_predict_mode_py",
"switch_dropout"
]
def maybe_global_mode(mode):
"""Returns :func:`texar.global_mode` if :attr:`mode` is `None`,
otherwise returns :attr:`mode` as-is.
"""
if mode is None:
return context.global_mode()
else:
return mode
def is_train_mode(mode):
"""Returns a bool Tensor indicating whether the global mode is TRAIN.
If :attr:`mode` is `None`, the mode is determined by
:func:`texar.global_mode`.
"""
if mode is None:
return context.global_mode_train()
else:
return tf.equal(mode, tf.estimator.ModeKeys.TRAIN)
def is_eval_mode(mode):
"""Returns a bool Tensor indicating whether the global mode is EVAL.
If :attr:`mode` is `None`, the mode is determined by
:func:`texar.global_mode`.
"""
if mode is None:
return context.global_mode_eval()
else:
return tf.equal(mode, tf.estimator.ModeKeys.EVAL)
def is_predict_mode(mode):
"""Returns a bool Tensor indicating whether the global mode is PREDICT.
If :attr:`mode` is `None`, the mode is determined by
:func:`texar.global_mode`.
"""
if mode is None:
return context.global_mode_predict()
else:
return tf.equal(mode, tf.estimator.ModeKeys.PREDICT)
def is_train_mode_py(mode, default=True):
"""Returns a python boolean indicating whether the mode is TRAIN.
Args:
mode: A string taking value in
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`.
Can be `None`.
default (bool): The return value when :attr:`mode` is `None`. Default
is `True`.
Returns:
A python boolean.
"""
if mode is None:
return default
if mode not in context.valid_modes():
raise ValueError('Unknown mode: {}'.format(mode))
return mode == tf.estimator.ModeKeys.TRAIN
def is_eval_mode_py(mode, default=False):
"""Returns a python boolean indicating whether the mode is EVAL.
Args:
mode: A string taking value in
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`.
Can be `None`.
default (bool): The return value when :attr:`mode` is `None`. Default
is `False`.
Returns:
A python boolean.
"""
if mode is None:
return default
if mode not in context.valid_modes():
raise ValueError('Unknown mode: {}'.format(mode))
return mode == tf.estimator.ModeKeys.EVAL
def is_predict_mode_py(mode, default=False):
"""Returns a python boolean indicating whether the mode is PREDICT.
Args:
mode: A string taking value in
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`.
Can be `None`.
default (bool): The return value when :attr:`mode` is `None`. Default
is `False`.
Returns:
A python boolean.
"""
if mode is None:
return default
if mode not in context.valid_modes():
raise ValueError('Unknown mode: {}'.format(mode))
return mode == tf.estimator.ModeKeys.PREDICT
def switch_dropout(dropout_keep_prob, mode=None):
"""Turns off dropout when not in training mode.
Args:
dropout_keep_prob: Dropout keep probability in training mode
mode (optional): A Tensor taking values of
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`.
Dropout is activated if :attr:`mode` is `TRAIN`.
If `None`, the mode is inferred from
:func:`texar.global_mode`.
Returns:
A unit Tensor that equals the dropout keep probability in `TRAIN` mode,
and `1.0` in other modes.
"""
return 1. - (1. - dropout_keep_prob) * tf.to_float(is_train_mode(mode))
| 31.503356 | 79 | 0.659565 |
8896801c28c3e70ad8790ddf5e981903151fa98e | 24,191 | py | Python | tensor2tensor/utils/yellowfin.py | AgoloCuongHoang/tensor2tensor | 98903799fa442b5211eb6dfde997b229347f389a | [
"Apache-2.0"
] | 1 | 2019-12-21T21:26:11.000Z | 2019-12-21T21:26:11.000Z | tensor2tensor/utils/yellowfin.py | AgoloCuongHoang/tensor2tensor | 98903799fa442b5211eb6dfde997b229347f389a | [
"Apache-2.0"
] | null | null | null | tensor2tensor/utils/yellowfin.py | AgoloCuongHoang/tensor2tensor | 98903799fa442b5211eb6dfde997b229347f389a | [
"Apache-2.0"
] | 1 | 2020-07-13T03:15:32.000Z | 2020-07-13T03:15:32.000Z | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YellowFin for TensorFlow. Thanks Jian Zhang: zjian [@] stanford [.] edu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# Values for gate_gradients.
GATE_NONE = tf.compat.v1.train.Optimizer.GATE_NONE
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP
GATE_GRAPH = tf.compat.v1.train.Optimizer.GATE_GRAPH
class YellowFinOptimizer(object):
"""Optimizer that implements the YellowFin algorithm.
See [Zhang et. al., 2017](https://arxiv.org/abs/1706.03471) for details.
"""
def __init__(self,
learning_rate=1.0,
momentum=0.0,
clip_thresh=None,
beta=0.999,
curvature_window_width=20,
zero_debias=True,
delta_mu=0.0,
sparsity_debias=True,
use_locking=False,
name="YellowFin",
use_nesterov=False):
"""Construct a new YellowFin optimizer.
Implemented as a wrapper around tf.train.MomentumOptimizer
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
Set to 1.0 in the paper.
momentum: A Tensor or a floating point value. The momentum.
Set to 0.0 in the paper.
clip_thresh: A Tensor or a floating point value. The clipping threshold
for `tf.clip_by_global_norm`. If None, no clipping will be carried out.
beta: A float value or a constant float tensor. The smoothing parameter
for estimations.
curvature_window_width: A int value or a constant int tensor.
The curvature window width.
zero_debias: A boolean, zero debias moving-averages.
delta_mu: For extensions. Not necessary in the basic use.
sparsity_debias: A boolean. Gradient norm and curvature are
biased to larger values when calculated with sparse gradient.
This is useful when the model is very sparse, e.g. LSTM with
word embedding. For non-sparse CNN, turning it off could
slightly accelerate the speed.
use_locking: If True, use locks for update operations.
name: Optional name prefix for the operations created when
applying gradients. Defaults to "YellowFin".
use_nesterov: If True, the underlying MomentumOptimizer uses Nesterov
Momentum. Set to False in the default YellowFin algorithm.
Note:
clip_thresh is the threshold value on ||lr * gradient||,
delta_mu can be place holder/variable/tensor scalar.
They are used for additional momentum in situations such as
asynchronous-parallel training.
The default is 0.0(or None) for basic usage of the optimizer.
Other features:
If you want to manually control the learning rates, self.lr_factor is
an interface to the outside, it is an multiplier for the internal
learning rate in YellowFin. It is helpful when you want to do additional
hand tuning or some decaying scheme to the tuned learning rate in
YellowFin.
Example on using lr_factor can be found here:
https://github.com/JianGoForIt/YellowFin/blob/master/char-rnn-tensorflow/train_YF.py#L140
"""
# Set lr and mu
self._lr = learning_rate
self._mu = momentum
# Set lr and mu tensor.
self._lr_var = tf.get_variable("YF_lr",
dtype=tf.float32,
trainable=False,
initializer=learning_rate)
self._mu_var = tf.get_variable("YF_mu",
dtype=tf.float32,
trainable=False,
initializer=tf.constant(momentum))
# Tuning factor for learning rates step or decaying scheme.
self.lr_factor = tf.get_variable("YF_lr_factor",
dtype=tf.float32,
trainable=False,
initializer=tf.constant(1.0))
# Gradient Clipping Threshold.
if clip_thresh is not None:
self._clip_thresh_var = tf.get_variable(
"YF_clip_thresh",
dtype=tf.float32,
trainable=False,
initializer=tf.constant(clip_thresh))
else:
self._clip_thresh_var = None
# Set initial lr and mu for momentum.
self._lr_m = self._lr_var * self.lr_factor
self._mu_m = self._mu_var + delta_mu
# Init momentum optimizer.
self._momentum_optimizer = tf.train.MomentumOptimizer(
self._lr_m, self._mu_m, use_locking, name, use_nesterov)
# Moving average for statistics.
self._beta = beta
self._moving_averager = None
# Step counting.
self._step = tf.get_variable("YF_step",
dtype=tf.int32,
trainable=False,
initializer=tf.constant(0))
# YF_step + 1 op.
self._increment_step_op = None
# For conditional tuning.
self._do_tune = tf.greater(self._step, tf.constant(0))
# Moving-averages.
self._zero_debias = zero_debias
self._sparsity_debias = sparsity_debias
# For curvature range.
self.curvature_window_width = curvature_window_width
self._curv_win = None
# Gradients and Variables.
self._grad = None
self._vars = None
# Get per var g**2, norm**2 and mean(norm**2).
self._grad_squared = None
self._grad_norm_squared = None
self._grad_norm_squared_avg = None
# Mean(grad) and Mean(grad**2) to compute Variance.
self._grad_avg = None
self._grad_avg_squared = None
# Max and Min curvature variations.
self._h_max_t = None
self._h_min_t = None
self._h_min = None
self._h_max = None
# Gradient Expected Variance.
self._grad_var = None
# Gradient Norm and Mean(Gradient Norm).
self._grad_norm = None
self._grad_norm_avg = None
# Distance to optimum and Mean(Distance to optimum).
self._d_t = None
self._dist_to_opt_avg = None
# Maintains moving averages of variables
# by employing an exponential decay(Beta),
# and (zero_devias) moving-averages.
self._moving_averager = None
# Handling Sparse Matrix
self._sparsity = None
self._sparsity_avg = None
def _curvature_range(self):
"""Curvature range.
Returns:
h_max_t, h_min_t ops
"""
self._curv_win = tf.get_variable("curv_win",
dtype=tf.float32,
trainable=False,
shape=[self.curvature_window_width,],
initializer=tf.zeros_initializer)
# We use log smoothing for curvature range
self._curv_win = tf.scatter_update(self._curv_win,
self._step % self.curvature_window_width,
tf.log(self._grad_norm_squared))
# Note here the iterations start from iteration 0
valid_window = tf.slice(self._curv_win,
tf.constant([0,]),
tf.expand_dims(
tf.minimum(
tf.constant(self.curvature_window_width),
self._step + 1), dim=0))
self._h_min_t = tf.reduce_min(valid_window)
self._h_max_t = tf.reduce_max(valid_window)
curv_range_ops = []
with tf.control_dependencies([self._h_min_t, self._h_max_t]):
avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t])
with tf.control_dependencies([avg_op]):
self._h_min = tf.exp(
tf.identity(self._moving_averager.average(self._h_min_t)))
self._h_max = tf.exp(
tf.identity(self._moving_averager.average(self._h_max_t)))
if self._sparsity_debias:
self._h_min *= self._sparsity_avg
self._h_max *= self._sparsity_avg
curv_range_ops.append(avg_op)
return curv_range_ops # h_max_t, h_min_t
def _grad_variance(self):
"""Estimate of gradient Variance.
Returns:
C_t ops.
"""
grad_var_ops = []
tensor_to_avg = []
for t, g in zip(self._vars, self._grad):
if isinstance(g, tf.IndexedSlices):
tensor_to_avg.append(
tf.reshape(tf.unsorted_segment_sum(g.values,
g.indices,
g.dense_shape[0]),
shape=t.get_shape()))
else:
tensor_to_avg.append(g)
avg_op = self._moving_averager.apply(tensor_to_avg)
grad_var_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._grad_avg = [self._moving_averager.average(val)
for val in tensor_to_avg]
self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]
# Compute Variance
self._grad_var = tf.maximum(
tf.constant(1e-6, dtype=self._grad_norm_squared_avg.dtype),
self._grad_norm_squared_avg
- tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared]))
if self._sparsity_debias:
self._grad_var *= self._sparsity_avg
return grad_var_ops # C_t
def _dist_to_opt(self):
"""Distance to optimum.
Returns:
D_t ops
"""
dist_to_opt_ops = []
# Running average of the norm of gradient
self._grad_norm = tf.sqrt(self._grad_norm_squared)
avg_op = self._moving_averager.apply([self._grad_norm,])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._grad_norm_avg = self._moving_averager.average(self._grad_norm)
# Single iteration distance estimation, note here
# self._grad_norm_avg is per variable
self._d_t = self._grad_norm_avg / self._grad_norm_squared_avg
# Running average of distance
avg_op = self._moving_averager.apply([self._d_t])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._dist_to_opt_avg = tf.identity(
self._moving_averager.average(self._d_t))
if self._sparsity_debias:
self._dist_to_opt_avg /= tf.sqrt(self._sparsity_avg)
return dist_to_opt_ops # D_t
def _grad_sparsity(self):
"""Gradient sparsity."""
# If the sparse minibatch gradient has 10 percent of its entries
# non-zero, its sparsity is 0.1.
# The norm of dense gradient averaged from full dataset
# are roughly estimated norm of minibatch
# sparse gradient norm * sqrt(sparsity)
# An extension maybe only correct the sparse blob.
non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
avg_op = self._moving_averager.apply([self._sparsity,])
with tf.control_dependencies([avg_op]):
self._sparsity_avg = self._moving_averager.average(self._sparsity)
return avg_op
def _prepare_variables(self):
"""Prepare Variables for YellowFin.
Returns:
Grad**2, Norm, Norm**2, Mean(Norm**2) ops
"""
self._moving_averager = tf.train.ExponentialMovingAverage(
decay=self._beta, zero_debias=self._zero_debias)
# assert self._grad is not None and len(self._grad) > 0
# List for the returned Operations
prepare_variables_op = []
# Get per var g**2 and norm**2
self._grad_squared = []
self._grad_norm_squared = []
# Gradient squared
for v, g in zip(self._vars, self._grad):
if g is None: continue
with tf.colocate_with(v):
self._grad_squared.append(tf.square(g))
# Norm squared.
self._grad_norm_squared = [tf.reduce_sum(g_sq)
for g_sq in self._grad_squared]
if self._sparsity_debias:
avg_op_sparsity = self._grad_sparsity()
prepare_variables_op.append(avg_op_sparsity)
# The following running average on squared norm of gradient
# is shared by grad_var and dist_to_opt
avg_op = self._moving_averager.apply(self._grad_norm_squared)
with tf.control_dependencies([avg_op]):
self._grad_norm_squared_avg = [self._moving_averager.average(val)
for val in self._grad_norm_squared]
self._grad_norm_squared = tf.add_n(self._grad_norm_squared)
self._grad_norm_squared_avg = tf.add_n(self._grad_norm_squared_avg)
prepare_variables_op.append(avg_op)
return tf.group(*prepare_variables_op)
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substitution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var,])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
y = w - p / 3.0 / w
x = y + 1
return x
def _get_lr_tensor(self):
"""Get lr minimizing the surrogate.
Returns:
The lr_t.
"""
lr = tf.squared_difference(1.0, tf.sqrt(self._mu)) / self._h_min
return lr
def _get_mu_tensor(self):
"""Get the min mu which minimize the surrogate.
Returns:
The mu_t.
"""
root = self._get_cubic_root()
dr = self._h_max / self._h_min
mu = tf.maximum(
root**2, ((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1))**2)
return mu
def _yellowfin(self):
"""YellowFin auto-tuning optimizer based on momentum SGD.
Returns:
YF ops
(Curvature range,
Grad_variance,
Dist_to_opt,
Single-Step,
Auto-Tuning)
"""
# List for the returned Operations.
yellowfin_ops = []
# Curvature range ops.
curv_range_ops = self._curvature_range()
yellowfin_ops += curv_range_ops
# Estimate of gradient Variance ops.
grad_var_ops = self._grad_variance()
yellowfin_ops += grad_var_ops
# Distance to optimum ops.
dist_to_opt_ops = self._dist_to_opt()
yellowfin_ops += dist_to_opt_ops
# Single-Step: minimizes the surrogate for the expected
# squared distance from the optimum of a local quadratic
# approximation after a single step while keeping all directions in the
# robust region.
self._mu = tf.identity(tf.cond(self._do_tune,
self._get_mu_tensor,
lambda: self._mu_var))
with tf.control_dependencies([self._mu]):
self._lr = tf.identity(tf.cond(self._do_tune,
self._get_lr_tensor,
lambda: self._lr_var))
# Tune learning rate and momentum.
with tf.control_dependencies([self._mu, self._lr]):
self._mu = self._beta * self._mu_var + (1 - self._beta) * self._mu
self._lr = self._beta * self._lr_var + (1 - self._beta) * self._lr
yellowfin_ops.append(tf.assign(self._mu_var, self._mu))
yellowfin_ops.append(tf.assign(self._lr_var, self._lr))
yellowfin_ops = tf.group(*yellowfin_ops)
return yellowfin_ops
def get_name(self):
"""Get optimizer name."""
return self._momentum_optimizer.get_name()
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Applying gradients and tune hyperparams with YellowFin.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
(A group of operations)
Variable Update with Momentum ops,
YellowFin ops(Curvature, Variance, Distance) ops,
SingleStep and lr_mu tuning ops,
Step increment ops.
"""
self._grad, self._vars = zip(*[(g, t)
for g, t in grads_and_vars if g is not None])
# Var update with Momentum.
with tf.variable_scope("apply_updates"):
# Gradient Clipping?
if self._clip_thresh_var is not None:
self._grad, _ = tf.clip_by_global_norm(
self._grad, self._clip_thresh_var)
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
else:
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
# Begin lr and mu tuning.
with tf.variable_scope("prepare_yellowFin_variables"):
# the dependencies ideally only need to be after clip is done,
# i.e. depends on self._grads. However, the control_dependencies
# does not support indexed slice for sparse gradients.
# The alternative dependencies here might be slightly slower due
# to less parallelization.
with tf.control_dependencies([apply_grad_op,]):
prepare_variables_op = self._prepare_variables()
with tf.variable_scope("yellowfin"):
with tf.control_dependencies([prepare_variables_op]):
yellowfin_op = self._yellowfin()
# Update YellowFin step variable.
with tf.control_dependencies([yellowfin_op]):
self._increment_step_op = tf.assign_add(self._step, 1).op
return tf.group(apply_grad_op,
prepare_variables_op,
yellowfin_op,
self._increment_step_op)
def compute_gradients(self,
loss,
var_list,
global_step=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Compute gradients through momentum optimizer.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of tf.Variable to update
to minimize loss. Defaults to the list of variables collected
in the graph under the key GraphKey.TRAINABLE_VARIABLES.
global_step: Optional Variable to increment by one after the
variables have been updated.
gate_gradients: How to gate the computation of gradients.
Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
aggregation_method: Specifies the method used to combine
gradient terms. Valid values are defined in the class AggregationMethod.
colocate_gradients_with_ops: If True, try collocating gradients with
the corresponding op.
name: Optional name for the returned operation. Default to the name
passed to the Optimizer constructor.
grad_loss: Optional. A Tensor holding the gradient computed for loss.
Returns:
A list of (gradient, variable) pairs. Variable is always present,
but gradient can be None.
"""
del global_step, name # Unused for now.
return self._momentum_optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
def minimize(self,
loss,
global_step=None,
var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Adapted from TensorFlow Optimizer base class member function.
Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `tf.gradients()` and `self.apply_gradients()` explicitly instead
of using this function.
Args:
loss: A Tensor containing the value to minimize.
global_step: Optional Variable to increment by one after the variables
have been updated.
var_list: Optional list or tuple of Variable objects to update to
minimize loss. Defaults to the list of variables collected in
the graph under the key GraphKeys.TRAINABLE_VARIABLES.
gate_gradients: How to gate the computation of gradients.
Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class AggregationMethod.
colocate_gradients_with_ops: If True, try collocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A Tensor holding the gradient computed for loss.
Returns:
An Operation that updates the variables in var_list.
If global_step was not None, that operation also increments global_step.
Raises:
ValueError: if no gradients are provided for any variable.
"""
grads_and_vars = self._momentum_optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
for g, v in grads_and_vars:
print("g ", g)
print("v ", v)
return self.apply_gradients(grads_and_vars,
global_step=global_step,
name=name)
def get_slot(self, var, name):
"""Return a slot named `name` created for `var`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._momentum_optimizer.get_slot(var, name)
def get_slot_names(self):
"""Return a list of the names of the slots using MomentumOptimizer.
Returns:
A list of strings.
"""
return self._momentum_optimizer.get_slot_names()
| 37.622084 | 95 | 0.64214 |
9c06c03313cbdee4703a75dd816d1964f8d4e7c9 | 1,170 | py | Python | p2p_options/test_options.py | sergiolib/pytorch-CycleGAN-and-pix2pix | cd3058a6a0522a0ed9178682b06cda538947e335 | [
"BSD-3-Clause"
] | null | null | null | p2p_options/test_options.py | sergiolib/pytorch-CycleGAN-and-pix2pix | cd3058a6a0522a0ed9178682b06cda538947e335 | [
"BSD-3-Clause"
] | null | null | null | p2p_options/test_options.py | sergiolib/pytorch-CycleGAN-and-pix2pix | cd3058a6a0522a0ed9178682b06cda538947e335 | [
"BSD-3-Clause"
] | null | null | null | from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test p2p_options.
It also includes shared p2p_options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared p2p_options
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
parser.set_defaults(model='test')
# To avoid cropping, the load_size should be the same as crop_size
parser.set_defaults(load_size=parser.get_default('crop_size'))
self.isTrain = False
return parser
| 48.75 | 108 | 0.692308 |
ebf0d0b3e7deb2634401403801bd929ff16e5a5f | 2,758 | py | Python | src/routers/user.py | snokpok/listlive-backend | 1bfffe3e3f137912646631a06e4054edd8c0fc4d | [
"MIT"
] | null | null | null | src/routers/user.py | snokpok/listlive-backend | 1bfffe3e3f137912646631a06e4054edd8c0fc4d | [
"MIT"
] | null | null | null | src/routers/user.py | snokpok/listlive-backend | 1bfffe3e3f137912646631a06e4054edd8c0fc4d | [
"MIT"
] | null | null | null | from typing import Any, Dict
from bson.json_util import dumps
from fastapi.routing import APIRouter
from pydantic.main import BaseModel
from mongo import user_col, list_col
from auth_repo import ar
from dependencies import verify_token_dependency
from bson.objectid import ObjectId
from fastapi import Depends
from fastapi.routing import APIRouter
import json
user_router = APIRouter(
prefix="/users"
)
common_find_options_user = {"password": 0, "lists": 0}
@user_router.get("/")
async def get_all_users(claims: Dict[str, Any] = Depends(verify_token_dependency)):
search_res = user_col.find(
{"_id": {"$ne": ObjectId(claims.get("id"))}}, {"password": 0}
)
result_list = []
for user in search_res:
user["_id"] = str(user["_id"])
result_list.append(user)
return result_list
@user_router.get("/me")
async def get_me(claims: Dict[str, Any] = Depends(verify_token_dependency)):
me_res = user_col.find_one(
{"_id": ObjectId(claims.get("id"))}, common_find_options_user
)
me_res["_id"] = str(me_res.get("_id"))
return me_res
class EditMeBody(BaseModel):
profile_emoji: str
@user_router.put("/me")
async def edit_my_attribs(body: EditMeBody, claims: Dict[str, Any] = Depends(verify_token_dependency)):
edit_res = user_col.update_one(
{"_id": ObjectId(claims.get("id"))},
{"$set": {"profile_emoji": body.profile_emoji}},
)
return {
"raw": json.loads(dumps(edit_res.raw_result)),
"metas": {
"matched": edit_res.matched_count,
"modified": edit_res.modified_count,
},
}
@user_router.get("/{user_id}/lists", dependencies=[Depends(verify_token_dependency)], deprecated=True)
async def get_lists_by_user(user_id: str):
res = user_col.find_one({"_id": ObjectId(user_id)}, {"password": 0})
for idx, list_id in enumerate(res.get("lists")):
list_res = list_col.find_one(filter={"_id": ObjectId(list_id)})
list_res["_id"] = str(list_res.get("_id"))
res["lists"][idx] = list_res
res["_id"] = str(res.get("_id"))
return res
@user_router.get("/search", dependencies=[Depends(verify_token_dependency)])
async def search_user_by_full_name(q: str):
search_res = user_col.find(
{"$text": {"$search": q}}, common_find_options_user)
result_list = []
for user in search_res:
user["_id"] = str(user.get("_id"))
result_list.append(user)
return result_list
@user_router.get("/{user_id}", dependencies=[Depends(verify_token_dependency)])
async def get_user_by_id(user_id: str):
res = user_col.find_one({"_id": ObjectId(user_id)},
common_find_options_user)
res["_id"] = str(res.get("_id"))
return res
| 31.340909 | 103 | 0.670776 |
91c837eed28b402746c8d77983006a86558d199b | 509 | py | Python | scraper.py | BatuhanOnder/Twitter-Bot | 62b33ea83971636d57e9d41a5c2c8aad54814817 | [
"MIT"
] | null | null | null | scraper.py | BatuhanOnder/Twitter-Bot | 62b33ea83971636d57e9d41a5c2c8aad54814817 | [
"MIT"
] | null | null | null | scraper.py | BatuhanOnder/Twitter-Bot | 62b33ea83971636d57e9d41a5c2c8aad54814817 | [
"MIT"
] | null | null | null | import twint
class tweetscraper:
def __init__(self,Config, ScrollCount):
self.Config = Config
self.scrape_all_tweets(ScrollCount)
def tweet_scraper(self):
''' Bu metot kullanici twitlerini çekmek için kullanilir'''
# Configuration
c = self.Config
# Run
twint.run.Search(c)
def scrape_all_tweets(self, ScrollCount):
''' Diger parametreler eklenecek'''
for i in range(ScrollCount):
self.tweet_scraper()
| 18.178571 | 67 | 0.620825 |
0afeca7626fbac2e7425ed8f8577637803747b9c | 4,963 | py | Python | client/verta/tests/test_dataset_versioning/test_dataset.py | stefan-petrov-toptal/modeldb | a8a9b9da6ed964c91351230b2f0d2703c75794de | [
"Apache-2.0"
] | 1 | 2021-03-26T05:41:34.000Z | 2021-03-26T05:41:34.000Z | client/verta/tests/test_dataset_versioning/test_dataset.py | stefan-petrov-toptal/modeldb | a8a9b9da6ed964c91351230b2f0d2703c75794de | [
"Apache-2.0"
] | null | null | null | client/verta/tests/test_dataset_versioning/test_dataset.py | stefan-petrov-toptal/modeldb | a8a9b9da6ed964c91351230b2f0d2703c75794de | [
"Apache-2.0"
] | 1 | 2021-05-04T13:52:09.000Z | 2021-05-04T13:52:09.000Z | import collections
import pytest
import requests
import verta
class TestMetadata:
def test_description(self, client, created_entities, strs):
first_desc, second_desc = strs[:2]
dataset = client.create_dataset(desc=first_desc)
created_entities.append(dataset)
assert dataset.get_description() == first_desc
dataset.set_description(second_desc)
assert dataset.get_description() == second_desc
assert client.get_dataset(id=dataset.id).get_description() == second_desc
def test_tags(self, client, created_entities, strs):
tag1, tag2, tag3, tag4 = strs[:4]
dataset = client.create_dataset(tags=[tag1])
created_entities.append(dataset)
assert set(dataset.get_tags()) == {tag1}
dataset.add_tag(tag2)
assert set(dataset.get_tags()) == {tag1, tag2}
dataset.add_tags([tag3, tag4])
assert set(dataset.get_tags()) == {tag1, tag2, tag3, tag4}
dataset.del_tag(tag3)
dataset.del_tag(tag3) # no error if nonexistent
assert set(dataset.get_tags()) == {tag1, tag2, tag4}
assert set(client.get_dataset(id=dataset.id).get_tags()) == {tag1, tag2, tag4}
def test_attributes(self, client, created_entities):
Attr = collections.namedtuple('Attr', ['key', 'value'])
attr1 = Attr('key1', {'a': 1})
attr2 = Attr('key2', ['a', 1])
attr3 = Attr('key3', 'a')
attr4 = Attr('key4', 1)
dataset = client.create_dataset(attrs=dict([attr1]))
created_entities.append(dataset)
assert dataset.get_attributes() == dict([attr1])
dataset.add_attribute(*attr2)
assert dataset.get_attributes() == dict([attr1, attr2])
dataset.add_attributes(dict([attr3, attr4]))
assert dataset.get_attributes() == dict([attr1, attr2, attr3, attr4])
dataset.del_attribute(attr3.key)
dataset.del_attribute(attr3.key) # no error if nonexistent
assert dataset.get_attributes() == dict([attr1, attr2, attr4])
assert client.get_dataset(id=dataset.id).get_attributes() == dict([attr1, attr2, attr4])
for attr in [attr1, attr2, attr4]:
assert dataset.get_attribute(attr.key) == attr.value
# overwrite
new_val = 'b'
dataset.add_attribute(attr1.key, new_val)
assert dataset.get_attribute(attr1.key) == new_val
class TestCreateGet:
def test_create(self, client, created_entities):
dataset = client.set_dataset()
assert dataset
created_entities.append(dataset)
name = verta._internal_utils._utils.generate_default_name()
dataset = client.create_dataset(name)
assert dataset
created_entities.append(dataset)
with pytest.raises(requests.HTTPError, match="already exists"):
assert client.create_dataset(name)
with pytest.warns(UserWarning, match="already exists"):
client.set_dataset(name=dataset.name, time_created=123)
def test_get(self, client, created_entities):
name = verta._internal_utils._utils.generate_default_name()
with pytest.raises(ValueError):
client.get_dataset(name)
dataset = client.set_dataset(name)
created_entities.append(dataset)
assert dataset.id == client.get_dataset(dataset.name).id
assert dataset.id == client.get_dataset(id=dataset.id).id
# Deleting non-existing key:
dataset.del_attribute("non-existing")
def test_find(self, client, created_entities, strs):
name1, name2, name3, name4, tag1, tag2 = (
s + str(verta._internal_utils._utils.now())
for s in strs[:6]
)
dataset1 = client.create_dataset(name1, tags=[tag1])
dataset2 = client.create_dataset(name2, tags=[tag1])
dataset3 = client.create_dataset(name3, tags=[tag2])
dataset4 = client.create_dataset(name4, tags=[tag2])
created_entities.extend([dataset1, dataset2, dataset3, dataset4])
datasets = client.datasets.find("name == {}".format(name3))
assert len(datasets) == 1
assert datasets[0].id == dataset3.id
datasets = client.datasets.find("tags ~= {}".format(tag1))
assert len(datasets) == 2
assert set(dataset.id for dataset in datasets) == {dataset1.id, dataset2.id}
def test_repr(self, client, created_entities):
description = "this is a cool dataset"
tags = [u"tag1", u"tag2"]
dataset = client.set_dataset(desc=description, tags=tags)
created_entities.append(dataset)
str_repr = repr(dataset)
assert "name: {}".format(dataset.name) in str_repr
assert "id: {}".format(dataset.id) in str_repr
assert "time created" in str_repr
assert "time updated" in str_repr
assert "description: {}".format(description) in str_repr
assert "tags: {}".format(tags) in str_repr
| 35.963768 | 96 | 0.646988 |
6eec6b3b266e92d80ab9659df75da9cf433df9da | 3,113 | py | Python | flask_restly/serializer/__init__.py | gorzechowski/flask-restly | 54f28b66f35b0ab12ba4ee37bcd6d39aaf24111a | [
"MIT"
] | 16 | 2018-10-16T20:07:02.000Z | 2021-01-07T13:01:05.000Z | flask_restly/serializer/__init__.py | gorzechowski/flask-restly | 54f28b66f35b0ab12ba4ee37bcd6d39aaf24111a | [
"MIT"
] | 16 | 2018-10-16T14:09:55.000Z | 2020-01-16T07:52:22.000Z | flask_restly/serializer/__init__.py | gorzechowski/flask-restly | 54f28b66f35b0ab12ba4ee37bcd6d39aaf24111a | [
"MIT"
] | 1 | 2019-04-17T03:20:41.000Z | 2019-04-17T03:20:41.000Z | from flask import jsonify, current_app
try:
from google.protobuf.reflection import GeneratedProtocolMessageType
from google.protobuf.message import Message as ProtocolMessage
from google.protobuf.descriptor import FieldDescriptor
except ImportError:
pass
class SerializerBase:
def serialize(self, response, outgoing):
raise NotImplementedError()
def deserialize(self, request, incoming):
raise NotImplementedError()
class JsonSerializer(SerializerBase):
def serialize(self, response, _):
return jsonify(response)
def deserialize(self, request, _):
return request.get_json()
class ProtobufSerializer(SerializerBase):
def serialize(self, response, outgoing):
assert isinstance(outgoing, GeneratedProtocolMessageType) is True
outgoing = _dict_to_protobuf(response, outgoing)
return current_app.response_class(
outgoing.SerializeToString(),
mimetype=current_app.config.get('RESTLY_PROTOBUF_MIMETYPE')
)
def deserialize(self, request, incoming):
assert isinstance(incoming, GeneratedProtocolMessageType) is True
incoming = incoming()
data = request.get_data().strip().decode('unicode-escape')
incoming.ParseFromString(data.encode('utf-8'))
return _protobuf_to_dict(incoming)
json = JsonSerializer()
protobuf = ProtobufSerializer()
def _dict_to_protobuf(value, message):
def _parse_list(values, message):
if len(values) > 0 and isinstance(values[0], dict):
for v in values:
cmd = message.add()
_parse_dict(v, cmd)
else:
message.extend(values)
def _parse_dict(values, message):
for key, value in values.items():
if isinstance(value, dict):
_parse_dict(value, getattr(message, key))
elif isinstance(value, list):
_parse_list(value, getattr(message, key))
elif hasattr(message, str(key)):
setattr(message, key, value)
elif hasattr(message, '__setitem__'):
message[key] = value
else:
raise Exception(
'Not supported protobuf type.' +
'Tried to assign %s=%s: %s' % (key, value, message)
)
return message
return _parse_dict(value, message())
def _protobuf_to_dict(instance):
result = dict()
for descriptor, value in instance.ListFields():
if descriptor.label == FieldDescriptor.LABEL_REPEATED:
result[descriptor.name] = []
for item in value:
if isinstance(item, ProtocolMessage):
dict_item = _protobuf_to_dict(item)
result[descriptor.name].append(dict_item)
else:
result[descriptor.name].append(item)
elif descriptor.type == FieldDescriptor.TYPE_MESSAGE:
result[descriptor.name] = _protobuf_to_dict(value)
else:
result[descriptor.name] = value
return result
| 30.519608 | 73 | 0.628654 |
f644261e40dd6971176c7024cd583922bb929597 | 974 | py | Python | 基础教程/A2-神经网络基本原理/第1步 - 基本知识/src/ch02-BASIC/Level5_LearningRate.py | microsoft/ai-edu | 2f59fa4d3cf19f14e0b291e907d89664bcdc8df3 | [
"Apache-2.0"
] | 11,094 | 2019-05-07T02:48:50.000Z | 2022-03-31T08:49:42.000Z | 基础教程/A2-神经网络基本原理/第1步 - 基本知识/src/ch02-BASIC/Level5_LearningRate.py | microsoft/ai-edu | 2f59fa4d3cf19f14e0b291e907d89664bcdc8df3 | [
"Apache-2.0"
] | 157 | 2019-05-13T15:07:19.000Z | 2022-03-23T08:52:32.000Z | 基础教程/A2-神经网络基本原理/第1步 - 基本知识/src/ch02-BASIC/Level5_LearningRate.py | microsoft/ai-edu | 2f59fa4d3cf19f14e0b291e907d89664bcdc8df3 | [
"Apache-2.0"
] | 2,412 | 2019-05-07T02:55:15.000Z | 2022-03-30T06:56:52.000Z | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy as np
import matplotlib.pyplot as plt
def targetFunction(x):
y = (x-1)**2 + 0.1
return y
def derivativeFun(x):
y = 2*(x-1)
return y
def create_sample():
x = np.linspace(-1,3,num=100)
y = targetFunction(x)
return x,y
def draw_base():
x,y=create_sample()
plt.plot(x,y,'.')
plt.show()
return x,y
def gd(eta):
x = -0.8
a = np.zeros((2,10))
for i in range(10):
a[0,i] = x
a[1,i] = targetFunction(x)
dx = derivativeFun(x)
x = x - eta*dx
plt.plot(a[0,:],a[1,:],'x')
plt.plot(a[0,:],a[1,:])
plt.title("eta=%f" %eta)
plt.show()
if __name__ == '__main__':
eta = [1.1,1.,0.8,0.6,0.4,0.2,0.1]
for e in eta:
X,Y=create_sample()
plt.plot(X,Y,'.')
#plt.show()
gd(e)
| 19.877551 | 100 | 0.544148 |
0c2a926618bfd9c176a5b163db74cad44fd78ae2 | 19,127 | py | Python | google/cloud/aiplatform_v1beta1/types/explanation_metadata.py | dizcology/python-aiplatform | 1a135775966c8a2303ded529eba514dcf9db7205 | [
"Apache-2.0"
] | 2 | 2021-10-02T02:25:44.000Z | 2021-11-17T10:35:01.000Z | google/cloud/aiplatform_v1beta1/types/explanation_metadata.py | pompipo/python-aiplatform | 3612b05c62dfb46822cd2c1798fd47349dba33bc | [
"Apache-2.0"
] | 1 | 2021-03-02T18:25:00.000Z | 2021-03-02T18:25:00.000Z | google/cloud/aiplatform_v1beta1/types/explanation_metadata.py | pompipo/python-aiplatform | 3612b05c62dfb46822cd2c1798fd47349dba33bc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import struct_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1", manifest={"ExplanationMetadata",},
)
class ExplanationMetadata(proto.Message):
r"""Metadata describing the Model's input and output for
explanation.
Attributes:
inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputsEntry]):
Required. Map from feature names to feature input metadata.
Keys are the name of the features. Values are the
specification of the feature.
An empty InputMetadata is valid. It describes a text feature
which has the name specified as the key in
[ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
The baseline of the empty feature is chosen by Vertex AI.
For Vertex AI-provided Tensorflow images, the key can be any
friendly name of the feature. Once specified,
[featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
are keyed by this key (if not grouped with another feature).
For custom images, the key must match with the key in
[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
outputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.OutputsEntry]):
Required. Map from output names to output
metadata.
For Vertex AI-provided Tensorflow images, keys
can be any user defined string that consists of
any UTF-8 characters.
For custom images, keys are the name of the
output field in the prediction to be explained.
Currently only one key is allowed.
feature_attributions_schema_uri (str):
Points to a YAML file stored on Google Cloud Storage
describing the format of the [feature
attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions].
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject>`__.
AutoML tabular Models always have this field populated by
Vertex AI. Note: The URI given on output may be different,
including the URI scheme, than the one given on input. The
output URI will point to a location where the user only has
a read access.
"""
class InputMetadata(proto.Message):
r"""Metadata of the input of a feature.
Fields other than
[InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines]
are applicable only for Models that are using Vertex AI-provided
images for Tensorflow.
Attributes:
input_baselines (Sequence[google.protobuf.struct_pb2.Value]):
Baseline inputs for this feature.
If no baseline is specified, Vertex AI chooses the baseline
for this feature. If multiple baselines are specified,
Vertex AI returns the average attributions across them in
[Attributions.baseline_attribution][].
For Vertex AI-provided Tensorflow images (both 1.x and 2.x),
the shape of each baseline must match the shape of the input
tensor. If a scalar is provided, we broadcast to the same
shape as the input tensor.
For custom images, the element of the baselines must be in
the same format as the feature's input in the
[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][].
The schema of any single instance may be specified via
Endpoint's DeployedModels'
[Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
[instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri].
input_tensor_name (str):
Name of the input tensor for this feature.
Required and is only applicable to Vertex AI-
provided images for Tensorflow.
encoding (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Encoding):
Defines how the feature is encoded into the
input tensor. Defaults to IDENTITY.
modality (str):
Modality of the feature. Valid values are:
numeric, image. Defaults to numeric.
feature_value_domain (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.FeatureValueDomain):
The domain details of the input feature
value. Like min/max, original mean or standard
deviation if normalized.
indices_tensor_name (str):
Specifies the index of the values of the input tensor.
Required when the input tensor is a sparse representation.
Refer to Tensorflow documentation for more details:
https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
dense_shape_tensor_name (str):
Specifies the shape of the values of the input if the input
is a sparse representation. Refer to Tensorflow
documentation for more details:
https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
index_feature_mapping (Sequence[str]):
A list of feature names for each index in the input tensor.
Required when the input
[InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding]
is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR.
encoded_tensor_name (str):
Encoded tensor is a transformation of the input tensor. Must
be provided if choosing [Integrated Gradients
attribution][ExplanationParameters.integrated_gradients_attribution]
or [XRAI
attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution]
and the input tensor is not differentiable.
An encoded tensor is generated if the input tensor is
encoded by a lookup table.
encoded_baselines (Sequence[google.protobuf.struct_pb2.Value]):
A list of baselines for the encoded tensor.
The shape of each baseline should match the
shape of the encoded tensor. If a scalar is
provided, Vertex AI broadcasts to the same shape
as the encoded tensor.
visualization (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization):
Visualization configurations for image
explanation.
group_name (str):
Name of the group that the input belongs to. Features with
the same group name will be treated as one feature when
computing attributions. Features grouped together can have
different shapes in value. If provided, there will be one
single attribution generated in [
featureAttributions][Attribution.feature_attributions],
keyed by the group name.
"""
class Encoding(proto.Enum):
r"""Defines how the feature is encoded to [encoded_tensor][]. Defaults
to IDENTITY.
"""
ENCODING_UNSPECIFIED = 0
IDENTITY = 1
BAG_OF_FEATURES = 2
BAG_OF_FEATURES_SPARSE = 3
INDICATOR = 4
COMBINED_EMBEDDING = 5
CONCAT_EMBEDDING = 6
class FeatureValueDomain(proto.Message):
r"""Domain details of the input feature value. Provides numeric
information about the feature, such as its range (min, max). If the
feature has been pre-processed, for example with z-scoring, then it
provides information about how to recover the original feature. For
example, if the input feature is an image and it has been
pre-processed to obtain 0-mean and stddev = 1 values, then
original_mean, and original_stddev refer to the mean and stddev of
the original feature (e.g. image tensor) from which input feature
(with mean = 0 and stddev = 1) was obtained.
Attributes:
min_value (float):
The minimum permissible value for this
feature.
max_value (float):
The maximum permissible value for this
feature.
original_mean (float):
If this input feature has been normalized to a mean value of
0, the original_mean specifies the mean value of the domain
prior to normalization.
original_stddev (float):
If this input feature has been normalized to a standard
deviation of 1.0, the original_stddev specifies the standard
deviation of the domain prior to normalization.
"""
min_value = proto.Field(proto.FLOAT, number=1,)
max_value = proto.Field(proto.FLOAT, number=2,)
original_mean = proto.Field(proto.FLOAT, number=3,)
original_stddev = proto.Field(proto.FLOAT, number=4,)
class Visualization(proto.Message):
r"""Visualization configurations for image explanation.
Attributes:
type_ (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Type):
Type of the image visualization. Only applicable to
[Integrated Gradients attribution]
[ExplanationParameters.integrated_gradients_attribution].
OUTLINES shows regions of attribution, while PIXELS shows
per-pixel attribution. Defaults to OUTLINES.
polarity (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Polarity):
Whether to only highlight pixels with
positive contributions, negative or both.
Defaults to POSITIVE.
color_map (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.ColorMap):
The color scheme used for the highlighted areas.
Defaults to PINK_GREEN for [Integrated Gradients
attribution][ExplanationParameters.integrated_gradients_attribution],
which shows positive attributions in green and negative in
pink.
Defaults to VIRIDIS for [XRAI
attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution],
which highlights the most influential regions in yellow and
the least influential in blue.
clip_percent_upperbound (float):
Excludes attributions above the specified percentile from
the highlighted areas. Using the clip_percent_upperbound and
clip_percent_lowerbound together can be useful for filtering
out noise and making it easier to see areas of strong
attribution. Defaults to 99.9.
clip_percent_lowerbound (float):
Excludes attributions below the specified
percentile, from the highlighted areas. Defaults
to 62.
overlay_type (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.OverlayType):
How the original image is displayed in the
visualization. Adjusting the overlay can help
increase visual clarity if the original image
makes it difficult to view the visualization.
Defaults to NONE.
"""
class Type(proto.Enum):
r"""Type of the image visualization. Only applicable to [Integrated
Gradients attribution]
[ExplanationParameters.integrated_gradients_attribution].
"""
TYPE_UNSPECIFIED = 0
PIXELS = 1
OUTLINES = 2
class Polarity(proto.Enum):
r"""Whether to only highlight pixels with positive contributions,
negative or both. Defaults to POSITIVE.
"""
POLARITY_UNSPECIFIED = 0
POSITIVE = 1
NEGATIVE = 2
BOTH = 3
class ColorMap(proto.Enum):
r"""The color scheme used for highlighting areas."""
COLOR_MAP_UNSPECIFIED = 0
PINK_GREEN = 1
VIRIDIS = 2
RED = 3
GREEN = 4
RED_GREEN = 6
PINK_WHITE_GREEN = 5
class OverlayType(proto.Enum):
r"""How the original image is displayed in the visualization."""
OVERLAY_TYPE_UNSPECIFIED = 0
NONE = 1
ORIGINAL = 2
GRAYSCALE = 3
MASK_BLACK = 4
type_ = proto.Field(
proto.ENUM,
number=1,
enum="ExplanationMetadata.InputMetadata.Visualization.Type",
)
polarity = proto.Field(
proto.ENUM,
number=2,
enum="ExplanationMetadata.InputMetadata.Visualization.Polarity",
)
color_map = proto.Field(
proto.ENUM,
number=3,
enum="ExplanationMetadata.InputMetadata.Visualization.ColorMap",
)
clip_percent_upperbound = proto.Field(proto.FLOAT, number=4,)
clip_percent_lowerbound = proto.Field(proto.FLOAT, number=5,)
overlay_type = proto.Field(
proto.ENUM,
number=6,
enum="ExplanationMetadata.InputMetadata.Visualization.OverlayType",
)
input_baselines = proto.RepeatedField(
proto.MESSAGE, number=1, message=struct_pb2.Value,
)
input_tensor_name = proto.Field(proto.STRING, number=2,)
encoding = proto.Field(
proto.ENUM, number=3, enum="ExplanationMetadata.InputMetadata.Encoding",
)
modality = proto.Field(proto.STRING, number=4,)
feature_value_domain = proto.Field(
proto.MESSAGE,
number=5,
message="ExplanationMetadata.InputMetadata.FeatureValueDomain",
)
indices_tensor_name = proto.Field(proto.STRING, number=6,)
dense_shape_tensor_name = proto.Field(proto.STRING, number=7,)
index_feature_mapping = proto.RepeatedField(proto.STRING, number=8,)
encoded_tensor_name = proto.Field(proto.STRING, number=9,)
encoded_baselines = proto.RepeatedField(
proto.MESSAGE, number=10, message=struct_pb2.Value,
)
visualization = proto.Field(
proto.MESSAGE,
number=11,
message="ExplanationMetadata.InputMetadata.Visualization",
)
group_name = proto.Field(proto.STRING, number=12,)
class OutputMetadata(proto.Message):
r"""Metadata of the prediction output to be explained.
Attributes:
index_display_name_mapping (google.protobuf.struct_pb2.Value):
Static mapping between the index and display name.
Use this if the outputs are a deterministic n-dimensional
array, e.g. a list of scores of all the classes in a
pre-defined order for a multi-classification Model. It's not
feasible if the outputs are non-deterministic, e.g. the
Model produces top-k classes or sort the outputs by their
values.
The shape of the value must be an n-dimensional array of
strings. The number of dimensions must match that of the
outputs to be explained. The
[Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name]
is populated by locating in the mapping with
[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
display_name_mapping_key (str):
Specify a field name in the prediction to look for the
display name.
Use this if the prediction contains the display names for
the outputs.
The display names in the prediction must have the same shape
of the outputs, so that it can be located by
[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
for a specific output.
output_tensor_name (str):
Name of the output tensor. Required and is
only applicable to AI Platform provided images
for Tensorflow.
"""
index_display_name_mapping = proto.Field(
proto.MESSAGE,
number=1,
oneof="display_name_mapping",
message=struct_pb2.Value,
)
display_name_mapping_key = proto.Field(
proto.STRING, number=2, oneof="display_name_mapping",
)
output_tensor_name = proto.Field(proto.STRING, number=3,)
inputs = proto.MapField(
proto.STRING, proto.MESSAGE, number=1, message=InputMetadata,
)
outputs = proto.MapField(
proto.STRING, proto.MESSAGE, number=2, message=OutputMetadata,
)
feature_attributions_schema_uri = proto.Field(proto.STRING, number=3,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 49.04359 | 129 | 0.619125 |
54fe4130942c90a7856ef386e252325c47408c86 | 1,201 | py | Python | apps/ingest/migrations/0001_squashed_0005_auto_20201027_1653.py | ecds/readux | 4eac8b48efef8126f4f2be28b5eb943c85a89c2e | [
"Apache-2.0"
] | 18 | 2017-06-12T09:58:02.000Z | 2021-10-01T11:14:34.000Z | apps/ingest/migrations/0001_squashed_0005_auto_20201027_1653.py | ecds/readux | 4eac8b48efef8126f4f2be28b5eb943c85a89c2e | [
"Apache-2.0"
] | 276 | 2019-04-26T20:13:01.000Z | 2022-03-31T10:26:28.000Z | apps/ingest/migrations/0001_squashed_0005_auto_20201027_1653.py | ecds/readux | 4eac8b48efef8126f4f2be28b5eb943c85a89c2e | [
"Apache-2.0"
] | 7 | 2018-03-13T23:44:26.000Z | 2021-09-15T17:54:55.000Z | # Generated by Django 2.2.10 on 2020-10-29 15:21
import apps.ingest.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('ingest', '0001_initial'), ('ingest', '0002_auto_20201021_1610'), ('ingest', '0003_auto_20201027_1452'), ('ingest', '0004_auto_20201027_1605'), ('ingest', '0005_auto_20201027_1653')]
initial = True
dependencies = [
('manifests', '0012_auto_20200819_1608'),
('canvases', '0005_auto_20201027_1452'),
]
operations = [
migrations.CreateModel(
name='Local',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bundle', models.FileField(upload_to='')),
('temp_file_path', models.FilePathField(path='/tmp/tmpz0h5pz94')),
('image_server', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='manifests.ImageServer')),
('manifest', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='manifests.Manifest')),
],
),
]
| 38.741935 | 199 | 0.644463 |
c941153434bdc707eea163e86ebf1cabe765f7b8 | 9,817 | py | Python | code/analysis/trajectory_plotter.py | weidel-p/go-robot-nogo-robot | 026f1f753125089a03504320cc94a76888a0efc5 | [
"MIT"
] | 1 | 2020-09-23T22:16:10.000Z | 2020-09-23T22:16:10.000Z | code/analysis/trajectory_plotter.py | weidel-p/go-robot-nogo-robot | 026f1f753125089a03504320cc94a76888a0efc5 | [
"MIT"
] | null | null | null | code/analysis/trajectory_plotter.py | weidel-p/go-robot-nogo-robot | 026f1f753125089a03504320cc94a76888a0efc5 | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import numpy as np
import pylab as pl
import os
import pickle
import json
import rosbag
import sys
import transformations as trans
import math
import os
import scipy.signal as sciSig
import yaml
import colors
sys.path.append("code/striatal_model")
import params
from plot_tools2 import *
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.colors as mcolors
bag_fn = sys.argv[1]
spikes_left_fn = sys.argv[2]
spikes_right_fn = sys.argv[3]
channels_left_fn = sys.argv[4]
channels_right_fn = sys.argv[5]
experiment_fn = sys.argv[6]
traj_out_fn = sys.argv[7]
#turn_out_fn = sys.argv[8]
# spike data of the channels
data_left = np.loadtxt(spikes_left_fn)
senders_left = data_left[:, 0]
unique_senders_left = np.unique(senders_left) # all active senders
times_left = data_left[:, 1]
with open(channels_left_fn, "r+") as f:
channels_left = json.load(f)
channels_left = channels_left['channels']
data_right = np.loadtxt(spikes_right_fn)
senders_right = data_right[:, 0]
unique_senders_right = np.unique(senders_right) # all active senders
times_right = data_right[:, 1]
with open(channels_right_fn, "r+") as f:
channels_right = json.load(f)
channels_right = channels_right['channels']
with open(experiment_fn, "r+") as f:
cfg = yaml.load(f)
stim_times_start_left, stim_times_stop_left = get_stim_times(
cfg, "left", params, mask=False, scale=1.)
stim_times_start_right, stim_times_stop_right = get_stim_times(
cfg, "right", params, mask=False, scale=1.)
stim_times_left = zip(stim_times_start_left / 1000.,
stim_times_stop_left / 1000.)
stim_times_right = zip(stim_times_start_right / 1000.,
stim_times_stop_right / 1000.)
all_d1_left = np.ravel([c['d1'] for c in channels_left])
all_d2_left = np.ravel([c['d2'] for c in channels_left])
all_d1_right = np.ravel([c['d1'] for c in channels_right])
all_d2_right = np.ravel([c['d2'] for c in channels_right])
spikes_d1_left = np.hstack(
[times_left[np.where(senders_left == nid)[0]] for nid in all_d1_left])
spikes_d2_left = np.hstack(
[times_left[np.where(senders_left == nid)[0]] for nid in all_d2_left])
spikes_d1_right = np.hstack(
[times_right[np.where(senders_right == nid)[0]] for nid in all_d1_right])
spikes_d2_right = np.hstack(
[times_right[np.where(senders_right == nid)[0]] for nid in all_d2_right])
binsize = 100
hist_all_d1_left = np.histogram(spikes_d1_left, bins=(int(params.runtime)) / binsize)[0].astype(
'float') * 1000. / (binsize * len(all_d1_left))
hist_all_d2_left = np.histogram(spikes_d2_left, bins=(int(params.runtime)) / binsize)[0].astype(
'float') * 1000. / (binsize * len(all_d2_left))
hist_all_d1_right = np.histogram(spikes_d1_right, bins=(int(params.runtime)) / binsize)[0].astype(
'float') * 1000. / (binsize * len(all_d1_right))
hist_all_d2_right = np.histogram(spikes_d2_right, bins=(int(params.runtime)) / binsize)[0].astype(
'float') * 1000. / (binsize * len(all_d2_right))
bag = rosbag.Bag(bag_fn)
x = np.array([])
y = np.array([])
time = np.array([])
rot_z = np.array([])
for topic, msg, t in bag.read_messages():
time = np.append(time, t.to_time())
x = np.append(x, msg.pose.pose.position.x)
y = np.append(y, msg.pose.pose.position.y)
quat = msg.pose.pose.orientation
rot_z = np.append(rot_z, trans.euler_from_quaternion(
[quat.x, quat.y, quat.z, quat.w])[2])
bag.close()
time -= time[0]
mask = np.where(time <= 200)
time = time[mask]
x = x[mask]
y = y[mask]
rot_z = rot_z[mask]
rot_z = np.diff(rot_z)
rot_z = rot_z[np.where(abs(rot_z) < np.pi / 2.)]
i = 0
xp = []
yp = []
rot_z_p = []
inds = []
for ind, t in enumerate(time):
if t > i:
xp.append(x[ind])
yp.append(y[ind])
if ind < len(rot_z): # To prevent errors, sometimes gives an error ind is > len(rot_z)
rot_z_p.append(rot_z[ind])
inds.append(ind)
# transform maximal robotic time time[-1] (see launch trial script) by NEST simtime (20s)
i += time[-1] / 20.
def isDuringStimulation(t):
if "no_stim" in experiment_fn:
return False
for stim_time in stim_times_left:
if t >= stim_time[0] and t < stim_time[1]:
return True
for stim_time in stim_times_right:
if t >= stim_time[0] and t < stim_time[1]:
return True
return False
colors.seaborn.set_context('paper', font_scale=3.0,
rc={"lines.linewidth": 2.5})
colors.seaborn.set_style('whitegrid', {"axes.linewidth": 2.5})
fig_traj = pl.figure(figsize=[16, 10])
ax_left_hist = pl.subplot2grid((2, 2), (0, 0))
ax_right_hist = pl.subplot2grid((2, 2), (1, 0))
ax_traj = pl.subplot2grid((2, 2), (0, 1), rowspan=2)
x_ = x[::1]
y_ = y[::1]
points = np.array([x_, y_]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
conv = mcolors.ColorConverter().to_rgb
cmap = LinearSegmentedColormap.from_list(
"my_cmap", [conv(colors.colors[0]), conv(colors.colors[1])], N=1000)
lc = LineCollection(segments, cmap=cmap,
norm=pl.Normalize(0, 10))
t = np.linspace(0, 10, len(x_))
lc.set_array(t)
lc.set_linewidth(7)
for t, _ in enumerate(xp):
if isDuringStimulation(t):
ax_traj.plot(xp[t], yp[t], '.', color=colors.colors[2], markersize=40.)
else:
ax_traj.plot(xp[t], yp[t], '.', color=colors.colors[4], markersize=40.)
ax_traj.plot(xp[0], yp[0], 'g*', markersize=50)
ax_traj.set_xlabel("x", fontweight='bold')
ax_traj.set_ylabel("y ", fontweight='bold')
for tick_label in ax_traj.get_xticklabels():
tick_label.set_fontweight('bold')
for tick_label in ax_traj.get_yticklabels():
tick_label.set_fontweight('bold')
ax_traj.set_xlim([np.min(x) - 2, np.max(x) + 2])
ax_traj.set_ylim([np.min(y) - 2, np.max(y) + 2])
ax_traj.add_collection(lc)
ax_left_hist.plot(np.arange(len(hist_all_d1_left)) /
(1000. / binsize), hist_all_d1_left)
ax_left_hist.plot(np.arange(len(hist_all_d2_left)) /
(1000. / binsize), hist_all_d2_left)
ax_right_hist.plot(np.arange(len(hist_all_d1_right)) /
(1000. / binsize), hist_all_d1_right)
ax_right_hist.plot(np.arange(len(hist_all_d2_right)) /
(1000. / binsize), hist_all_d2_right)
ax_left_hist.text(-0.20, 0.97, 'L',
transform=ax_left_hist.transAxes, fontsize=35)
ax_right_hist.text(-0.20, 0.97, 'R',
transform=ax_right_hist.transAxes, fontsize=35)
#ax_left_hist.set_title("Left hemisphere")
#ax_right_hist.set_title("Right hemisphere")
ax_left_hist.set_ylabel("Firing rate (spks/s)", fontweight='bold')
ax_right_hist.set_ylabel("Firing rate (spks/s)", fontweight='bold')
ax_right_hist.set_xlabel("Time (s)", fontweight='bold')
# Filter the trace rot_z with low pass filter to make it easier to find change in directions
B, A = sciSig.butter(3, 0.1, btype='low', analog=False)
rotFilt = sciSig.lfilter(B, A, rot_z)
left_turn_inds = np.where(rot_z < -0.005)
left_turn_filt_inds = np.where(rotFilt < -0.005)
left_turn_chunks_ids = np.where(np.diff(left_turn_filt_inds) > 1)[1]
# np.append(0,left_turn_chunks_ids)
left_turn_chunks = []
for i, x in enumerate(left_turn_chunks_ids):
if i == 0:
left_turn_chunks.append(left_turn_filt_inds[0][0:x + 1])
else:
left_turn_chunks.append(
left_turn_filt_inds[0][left_turn_chunks_ids[i - 1] + 1:x + 1])
left_turn_chunk_times = [np.array(time[x]) / (time[-1] / 20.)
for x in left_turn_chunks]
right_turn_inds = np.where(rot_z > 0.005)
right_turn_filt_inds = np.where(rotFilt > 0.005)
right_turn_chunks_ids = np.where(np.diff(right_turn_filt_inds) > 1)[1]
# np.append(0,left_turn_chunks_ids)
right_turn_chunks = []
for i, x in enumerate(right_turn_chunks_ids):
if i == 0:
right_turn_chunks.append(right_turn_filt_inds[0][0:x + 1])
else:
right_turn_chunks.append(
right_turn_filt_inds[0][right_turn_chunks_ids[i - 1] + 1:x + 1])
right_turn_chunk_times = [
np.array(time[x]) / (time[-1] / 20.) for x in right_turn_chunks]
ax = pl.axes([.80, .80, .15, .15])
ax.plot(np.linspace(0, 20, len(rot_z)), rot_z)
stim_start = 0 # Flag to detect when stimulation starts for the first stim
stim_starttime=[]
stim_stoptime=[]
for t, _ in enumerate(xp):
try:
if isDuringStimulation(t):
if stim_start == 0:
stim_starttime.append(t)
stim_start = 1
ax.plot(t, rot_z_p[t], '.r', markersize=10., linewidth=1., color=colors.colors[2])
else:
ax.plot(t, rot_z_p[t], '.g', markersize=10., color=colors.colors[4])
# First time it comes here after stim_start = 1
if stim_start == 1:
stim_stoptime.append(t)
stim_start = 0
except:
pass
print "stim_starttime",stim_starttime
print "stim_stoptime",stim_stoptime
for x,y in zip(stim_starttime,stim_stoptime):
ax.axvspan(x,y,ymin=abs(min(rot_z))/(abs(max(rot_z))+abs(min(rot_z))),ymax=1.0,alpha=0.3,color=colors.colors[5])
ax.axvspan(x,y,ymin=0,ymax=abs(min(rot_z))/(abs(max(rot_z))+abs(min(rot_z))),alpha=0.3,color=colors.colors[3])
ax.set_xticklabels([])
ax.set_yticks([min(rot_z), 0.0, max(rot_z)])
#if "no_stim.yaml" in experiment_fn:
ax.set_yticklabels(['R', '0', 'L'],fontsize=30)
#else:
# ax.set_yticklabels([])
ax.set_ylim([min(rot_z), max(rot_z)])
ax.yaxis.tick_right()
#ax.vlines(left_turn_inds[0], -0.04, 0.04, colors.colors[1], alpha=0.2)
#ax.vlines(right_turn_inds[0], -0.04, 0.04, colors.colors[2], alpha=0.2)
#fig.savefig(turn_out_fn)
pl.savefig(traj_out_fn)
| 31.264331 | 116 | 0.675563 |
5cf729ab3f85f5ea91d334540ff92782219d42d6 | 5,327 | py | Python | scrnatools/plotting/_gene_density_plot.py | j-germino/sc-rna-tools-git | 4e9a4fce40f6a303a1869e93a6e52e8db663bd06 | [
"BSD-3-Clause"
] | null | null | null | scrnatools/plotting/_gene_density_plot.py | j-germino/sc-rna-tools-git | 4e9a4fce40f6a303a1869e93a6e52e8db663bd06 | [
"BSD-3-Clause"
] | null | null | null | scrnatools/plotting/_gene_density_plot.py | j-germino/sc-rna-tools-git | 4e9a4fce40f6a303a1869e93a6e52e8db663bd06 | [
"BSD-3-Clause"
] | null | null | null | """
Creates gene density plots for scRNAseq datasets
From sc-rna-tools package
Created on Mon Jan 10 15:57:46 2022
@author: joe germino (joe.germino@ucsf.edu)
"""
# external package imports
from anndata import AnnData
from typing import List, Optional, Tuple
import seaborn as sns
import matplotlib.pyplot as plt
from math import ceil
from scipy.sparse import issparse
from statsmodels.nonparametric.kernel_density import KDEMultivariateConditional, EstimatorSettings
from sklearn.preprocessing import StandardScaler
import numpy as np
# scrnatools package imports
from .._configs import configs
from .._utils import debug, check_path
logger = configs.create_logger(__name__.split('_', 1)[1])
# -------------------------------------------------------function----------------------------------------------------- #
@debug(logger, configs)
def gene_density_plot(
adata: AnnData,
gene_list: List[str],
data_loc: str = "X",
thresh: int = 1,
latent_rep: str = "X_umap",
est_settings=None,
color_map: str = "magma",
s: Optional[int] = None,
ncols: int = 3,
dims: Tuple[int] = (3, 3),
title: Optional[str] = None,
save_path: Optional[str] = None,
):
"""Plots the density of expression of genes on an embedding
Parameters
----------
adata
The AnnData containing the gene expression and cell data
gene_list
A list of genes to plot
data_loc
The location of the expression data to use for density calculations, can be a layer in 'adata.layers' or 'X'
to use the data stored in adata.X. Default 'X'
thresh
latent_rep
The 2D representation to plot gene expression for each cell on. Default 'X_umap'
est_settings
color_map
The pyplot colormap to use for plotting. Default 'magma'
s
The size of data points to plot. Default is to automatically calculate (None)
ncols
The number of columns in the figure
dims
A tuple of the dimensions of each subplot for a single gene. Default (3,3)
title
The title of the figure. Default None
save_path
The path to save the figure. Default None.
Raises
-------
ValueError
If the 'data_loc' provided is not 'X' or a valid layer in 'adata.layers'
"""
# set up figure
if len(gene_list) < ncols:
ncols = len(gene_list)
nrows = ceil((len(gene_list)) / ncols)
fig = plt.figure(figsize=(ncols * dims[0], nrows * dims[1]))
sns.set_theme(context="paper", style="white", )
# Iterate over genes to plot
for index, gene in enumerate(gene_list):
if gene not in adata.var_names:
logger.info(f"{gene} not in 'adata.var_names', skipping")
else:
temp_data = adata[:, gene]
if data_loc == "X":
if issparse(temp_data.X):
x = temp_data.X.todense()
else:
x = temp_data.X
else:
if data_loc in temp_data.layers:
if issparse(temp_data.X):
x = temp_data.layers[data_loc].todense()
else:
x = temp_data.layers[data_loc]
else:
raise ValueError(f"{data_loc} not 'X' or a valid layer in 'adata.layers'")
scaler = StandardScaler()
x_scaled = scaler.fit_transform(x).reshape(-1)
density = KDEMultivariateConditional(
endog=x_scaled,
exog=adata.obsm[latent_rep],
dep_type="c",
indep_type="cc",
bw="normal_reference",
defaults=est_settings
)
z1 = density.cdf(thresh + np.zeros_like(x_scaled), adata.obsm[latent_rep])
cm = plt.get_cmap(color_map)
s = int(80000 / adata.shape[0]) if s is None else s
# Plot density of gene expression
idx = np.argsort(x_scaled)
plt.subplot(nrows, ncols, index + 1)
ax = sns.scatterplot(
x=adata.obsm[latent_rep][idx, 0],
y=adata.obsm[latent_rep][idx, 1],
hue=1 - z1[idx],
palette=cm,
s=s,
linewidth=0
)
ax.set_aspect("equal") # Make sure the aspect ratio is square
ax.set(xticklabels=[], yticklabels=[], xlabel=None, ylabel=None, title=gene) # Get rid of x and y labels
norm = plt.Normalize(1 - z1[idx].min(), 1 - z1[idx].max())
sm = plt.cm.ScalarMappable(cmap=cm, norm=norm)
sm.set_array([])
# Remove the legend and add a colorbar
ax.get_legend().remove()
color_bar = ax.figure.colorbar(sm)
color_bar.set_label("Probability of Expression")
sns.despine(left=True, bottom=True) # no borders
fig.suptitle(title) # figure title
fig.tight_layout(rect=[0, 0.03, 1, 0.98])
if save_path is not None:
check_path(save_path.rsplit("/")[0])
logger.info(f"Saving figure to {save_path}")
plt.savefig(save_path, dpi=300)
plt.show()
| 35.278146 | 120 | 0.56636 |
d81c21363f4b377563d2ea00677dc146f8c9f8a8 | 2,547 | py | Python | knapsack.py | lefedoreichmann/pytorch-vsumm-reinforce | 3eddd54166fbdc5cec8ee93c54b98999aaf8f3f4 | [
"MIT"
] | 18 | 2019-10-17T02:05:40.000Z | 2021-05-08T15:39:49.000Z | knapsack.py | lefedoreichmann/pytorch-vsumm-reinforce | 3eddd54166fbdc5cec8ee93c54b98999aaf8f3f4 | [
"MIT"
] | null | null | null | knapsack.py | lefedoreichmann/pytorch-vsumm-reinforce | 3eddd54166fbdc5cec8ee93c54b98999aaf8f3f4 | [
"MIT"
] | 1 | 2020-07-27T20:46:14.000Z | 2020-07-27T20:46:14.000Z | import numpy as np
'''
------------------------------------------------
Use dynamic programming (DP) to solve 0/1 knapsack problem
Time complexity: O(nW), where n is number of items and W is capacity
Author: Kaiyang Zhou
Website: https://kaiyangzhou.github.io/
------------------------------------------------
knapsack_dp(values,weights,n_items,capacity,return_all=False)
Input arguments:
1. values: a list of numbers in either int or float, specifying the values of items
2. weights: a list of int numbers specifying weights of items
3. n_items: an int number indicating number of items
4. capacity: an int number indicating the knapsack capacity
5. return_all: whether return all info, defaulty is False (optional)
Return:
1. picks: a list of numbers storing the positions of selected items
2. max_val: maximum value (optional)
------------------------------------------------
'''
def knapsack_dp(values,weights,n_items,capacity,return_all=False):
check_inputs(values,weights,n_items,capacity)
table = np.zeros((n_items+1,capacity+1),dtype=np.float32)
keep = np.zeros((n_items+1,capacity+1),dtype=np.float32)
for i in range(1,n_items+1):
for w in range(0,capacity+1):
wi = weights[i-1] # weight of current item
vi = values[i-1] # value of current item
if (wi <= w) and (vi + table[i-1,w-wi] > table[i-1,w]):
table[i,w] = vi + table[i-1,w-wi]
keep[i,w] = 1
else:
table[i,w] = table[i-1,w]
picks = []
K = capacity
for i in range(n_items,0,-1):
if keep[i,K] == 1:
picks.append(i)
K -= weights[i-1]
picks.sort()
picks = [x-1 for x in picks] # change to 0-index
if return_all:
max_val = table[n_items,capacity]
return picks,max_val
return picks
def check_inputs(values,weights,n_items,capacity):
# check variable type
assert(isinstance(values,list))
assert(isinstance(weights,list))
assert(isinstance(n_items,int))
assert(isinstance(capacity,int))
# check value type
assert(all(isinstance(val,int) or isinstance(val,float) for val in values))
assert(all(isinstance(val,int) for val in weights))
# check validity of value
assert(all(val >= 0 for val in weights))
assert(n_items > 0)
assert(capacity > 0)
if __name__ == '__main__':
values = [2,3,4]
weights = [1,2,3]
n_items = 3
capacity = 3
picks = knapsack_dp(values,weights,n_items,capacity)
print(picks)
| 32.653846 | 85 | 0.616019 |
3f8ee53ac072b1a6c37890193278b6f6c31469c6 | 48,689 | py | Python | src/pfeifferpumps/pfeifferproto.py | tspspi/pfeifferpumps | 9776e5ff5a2386867da3c721daab74de38a6b071 | [
"BSD-3-Clause"
] | 1 | 2021-12-01T21:03:00.000Z | 2021-12-01T21:03:00.000Z | src/pfeifferpumps/pfeifferproto.py | tspspi/pfeifferpumps | 9776e5ff5a2386867da3c721daab74de38a6b071 | [
"BSD-3-Clause"
] | null | null | null | src/pfeifferpumps/pfeifferproto.py | tspspi/pfeifferpumps | 9776e5ff5a2386867da3c721daab74de38a6b071 | [
"BSD-3-Clause"
] | null | null | null | class SerialProtocolViolation(Exception):
pass
class SerialProtocolUnknownRegister(Exception):
pass
class SerialCommunicationError(Exception):
pass
class SerialSimulationDone(Exception):
pass
class PfeifferProtocol:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def __init__(self):
pass
def decodePacketRaw(self, line):
if len(line) < 14:
raise SerialProtocolViolation('Protocol violation. Sentence too short')
if line[-1] != '\r':
raise SerialProtocolViolation('Protocol violation. Sentence not ended with carriage return')
devAddress = line[:3]
devAction = line[3]
# The following check should verify if the byte at position 5 is
# always 0 as specified in the docs. This does not seem to be a valid
# constraint in reality?
# if line[4] != '0':
# raise SerialProtocolViolation('Protocol violation. Byte at position 5 is not 0')
devParamNumber = line[5:8]
msgDataLength = line[8:10]
# Now calculate checksum
chkSum = 0
realChkSum = sum(bytearray(line[:-4].encode(encoding = "ASCII"))) % 256
if realChkSum != int(line[-4:-1]):
raise SerialProtocolViolation('Protocol violation. Checksum invalid')
# In case the checksum passes return an dictionary containing the required information
return {
"address" : int(devAddress),
"param" : int(devParamNumber),
"action" : int(devAction),
"payloadRaw" : line[10:-4],
"payloadLength" : int(msgDataLength),
"packetRaw" : line
}
def decodeDataType_0(self, payload):
if len(payload) != 6:
raise SerialProtocolViolation('Datatype boolean_old has to be 6 characters long')
if payload == '111111':
return True
if payload == '000000':
return False
raise SerialProtocolViolation('Unknown boolean_old value '+payload)
def decodeDataType_1(self, payload):
if len(payload) != 6:
raise SerialProtocolViolation('Datatype u_integer has to be 6 characters long')
for i in range(0, len(payload)):
if (ord(payload[i]) < 0x30) or (ord(payload[i]) > 0x39):
raise SerialProtocolViolation('Invalid non ASCII number character in u_integer payload '+payload)
return int(payload)
def decodeDataType_2(self, payload):
if len(payload) != 6:
raise SerialProtocolViolation('Datatype u_real has to be 6 characters long')
for i in range(0, len(payload)):
if (ord(payload[i]) < 0x30) or (ord(payload[i]) > 0x39):
raise SerialProtocolViolation('Invalid non ASCII number character in u_real payload '+payload)
return float(payload)/100.0
def decodeDataType_3(self, payload):
if len(payload) != 6:
raise SerialProtocolViolation('Datatype u_expo has to be 6 characters long')
try:
num = float(payload)
except Exception:
raise SerialProtocolViolation('Invalid number '+payload+' for u_expo')
return num
def decodeDataType_4(self, payload):
# NOTE: Strings now seem to have (other than documentation)
# if len(payload) != 6:
# raise SerialProtocolViolation('Datatype u_string has to be 6 characters long')
for i in range(0, len(payload)):
if (ord(payload[i]) < 0x20):
raise SerialProtocolViolation('Invalid ASCII character in u_string payload '+payload)
return payload
def decodeDataType_5(self, payload):
raise SerialProtocolViolation('Not implemented (ToDo)')
def decodeDataType_6(self, payload):
if len(payload) != 1:
raise SerialProtocolViolation('Datatype boolean_new has to be 1 character long')
if payload == '1':
return True
if payload == '0':
return False
raise SerialProtocolViolation('Invalid boolean_new value '+payload)
def decodeDataType_7(self, payload):
if len(payload) != 3:
raise SerialProtocolViolation('Datatype u_short_int has to be 3 character long')
try:
num = int(payload)
except Exception:
raise SerialProtocolViolation('Cannot interpret u_short_int '+payload)
return num
def decodeDataType_9(self, payload):
if len(payload) != 6:
raise SerialProtocolViolation('Datatype tms_old has to be 6 character long')
onoff = payload[:3]
temp = payload[3:]
if onoff == '111':
onoff = True
elif onoff == '000':
onoff = False
else:
raise SerialProtocolViolation('Boolean value in tms_old has invalid value '+onoff)
try:
temp = int(temp)
except Exception:
raise SerialProtocolViolation('Signalled temperature in tms_old has non integer value '+temp)
return {
"onoff" : onoff,
"temp" : temp
}
def decodeDataType_10(self, payload):
if len(payload) != 6:
raise SerialProtocolViolation('Datatype u_expo_new has to be 6 character long')
try:
mantissa = int(payload[:4])
exponent = int(payload[-2:])
mantissa = float(mantissa) / 1000.0
return mantissa * pow(10, exponent)
except Exception:
raise SerialProtocolViolation('Mantissa or exponent in u_expo_new has invalid value '+payload)
def decodeDataType_11(self, payload):
if len(payload) != 16:
raise SerialProtocolViolation('Datatype string16 has to be 16 character long')
for i in range(0, len(payload)):
if (ord(payload[i]) < 0x20):
raise SerialProtocolViolation('Invalid ASCII character in string16 payload '+payload)
return payload
def decodeDataType_12(self, payload):
if len(payload) != 8:
raise SerialProtocolViolation('Datatype string8 has to be 8 character long')
for i in range(0, len(payload)):
if (ord(payload[i]) < 0x20):
raise SerialProtocolViolation('Invalid ASCII character in string8 payload '+payload)
return payload
def decodeDataType_default(self, payload):
raise SerialProtocolViolation('Unknown datatype specified for decoding')
decodeDataType_Dictionary = {
0 : "decodeDataType_0",
1 : "decodeDataType_1",
2 : "decodeDataType_2",
3 : "decodeDataType_3",
4 : "decodeDataType_4",
5 : "decodeDataType_5",
6 : "decodeDataType_6",
7 : "decodeDataType_7",
9 : "decodeDataType_9",
10 : "decodeDataType_10",
11 : "decodeDataType_11",
12 : "decodeDataType_12"
}
def decodeDataType(self, payload, datatype):
fun = self.decodeDataType_Dictionary.get(datatype, self.decodeDataType_default)
return getattr(self, fun)(payload)
def encodeDataType_0(self, payload):
if not isinstance(payload, bool):
raise SerialProtocolViolation("Trying to encode non boolean {} into bool boolean_old type".format(payload))
if payload == True:
return '111111'
else:
return '000000'
def encodeDataType_1(self, payload):
if not isinstance(payload, int):
raise SerialProtocolViolation("Trying to encode non positive integer {} into u_integer type".format(payload))
if payload < 0:
raise SerialProtocolViolation("Trying to encode non positive integer {} into u_integer type".format(payload))
return '{:06d}'.format(payload)
def encodeDataType_2(self, payload):
if not isinstance(payload, (int, float)):
raise SerialProtocolViolation("Trying to encode non positive floating point value {} into u_real type".format(payload))
if payload < 0:
raise SerialProtocolViolation("Trying to encode non positive floating point value {} into u_real type".format(payload))
return '{:06d}'.format(int(payload * 100.0))
def encodeDataType_default(self, payload):
raise SerialProtocolViolation("Data type not supported for encoding")
encodeDataType_Dictionary = {
0 : "encodeDataType_0",
1 : "encodeDataType_1",
2 : "encodeDataType_2"
# 3 : "encodeDataType_3",
# 4 : "encodeDataType_4",
# 5 : "encodeDataType_5",
# 6 : "encodeDataType_6",
# 7 : "encodeDataType_7",
# 9 : "encodeDataType_9",
# 10 : "encodeDataType_10",
# 11 : "encodeDataType_11",
# 12 : "encodeDataType_12"
}
def encodeDataType(self, payload, datatype):
fun = self.encodeDataType_Dictionary.get(datatype, self.encodeDataType_default)
return getattr(self, fun)(payload)
def decodePacket(self, packet, sentenceDictionary):
if not (("address" in packet) or ("action" in packet) or ("param" in packet) or ("payloadLength" in packet) or ("payloadRaw" in packet)):
raise SerialProtocolViolation("Packet passed does not contain information about a received serial protocol")
regParam = int(packet["param"])
if not regParam in sentenceDictionary:
raise SerialProtocolUnknownRegister("Unknown register {} in packet".format(regParam))
if packet["action"] == 1:
packet["payload"] = self.decodeDataType(packet["payloadRaw"], sentenceDictionary[regParam]["datatype"])
else:
packet["payload"] = "=?"
packet["designation"] = sentenceDictionary[regParam]["designation"]
packet["displayreg"] = sentenceDictionary[regParam]["display"]
packet["regaccess"] = sentenceDictionary[regParam]["access"]
packet["regunit"] = sentenceDictionary[regParam]["unit"]
packet["regmin"] = sentenceDictionary[regParam]["min"]
packet["regmax"] = sentenceDictionary[regParam]["max"]
packet["regdefault"] = sentenceDictionary[regParam]["default"]
packet["regpersistent"] = sentenceDictionary[regParam]["persistent"]
return packet
def encodePacket(self, targetAddress, action, regParam, value, sentenceDictionary, checkWritable = True):
# This function validates the passed value and creates an encoded packet
if not regParam in sentenceDictionary:
raise SerialProtocolViolation("Unknown register {} in dictionary".format(regParam))
if isinstance(value, (int, float)):
if sentenceDictionary[regParam]["min"] != None:
if value < sentenceDictionary[regParam]["min"]:
raise SerialProtocolViolation("Parameter {} has minimum value of {} but {} supplied".format(regParam, sentenceDictionary[regParam]["min"], value))
if sentenceDictionary[regParam]["max"] != None:
if value > sentenceDictionary[regParam]["max"]:
raise SerialProtocolViolation("Parameter {} has maximum value of {} but {} supplied".format(regParam, sentenceDictionary[regParam]["max"], value))
if checkWritable and (sentenceDictionary[regParam]["access"] != ACCESS_RW) and (sentenceDictionary[regParam]["access"] != ACCESS_W):
raise SerialProtocolViolation("Parameter {} is not writable".format(regParam))
# Try to encode the data ...
packet = { }
packet["address"] = targetAddress
packet["param"] = regParam
packet["action"] = action
packet["payloadRaw"] = self.encodeDataType(value, sentenceDictionary[regParam]["datatype"])
packet["payloadLength"] = len(packet["payloadRaw"])
packet["payload"] = value
packet["designation"] = sentenceDictionary[regParam]["designation"]
packet["displayreg"] = sentenceDictionary[regParam]["display"]
packet["regaccess"] = sentenceDictionary[regParam]["access"]
packet["regunit"] = sentenceDictionary[regParam]["unit"]
packet["regmin"] = sentenceDictionary[regParam]["min"]
packet["regmax"] = sentenceDictionary[regParam]["max"]
packet["regdefault"] = sentenceDictionary[regParam]["default"]
packet["regpersistent"] = sentenceDictionary[regParam]["persistent"]
# First just build the whole checksummed area
packet["packetRaw"] = "{:03d}{:1d}0{:03d}{:02d}{}".format(targetAddress, action, regParam, len(packet["payloadRaw"]), packet["payloadRaw"])
# Calculate the checksum
packet["packetRaw"] = packet["packetRaw"] + "{:03d}".format(sum(bytearray(packet["packetRaw"].encode(encoding = "ASCII"))) % 256) + "\r"
return packet
ACCESS_R = 0
ACCESS_RW = 1
ACCESS_W = 2
registers = {
"TC110" : {
# Control commands (0xx)
1 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "Heating", "designation" : "Heating", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "off", 1 : "on" } },
2 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "Standby", "designation" : "Standby", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "off", 1 : "on" } },
4 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "RUTimeCtrl", "designation" : "Run-up time control", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 1, "valueDescriptions" : { 0 : "off", 1 : "on" } },
9 : { "datatype" : 0, "access" : ACCESS_W, "display" : "ErrorAckn", "designation" : "Error acknowledgement", "unit" : None, "min" : 1, "max" : 1, "persistent" : False, "default" : None, "valueDescriptions" : { 1 : "Error acknowledgement" } },
10 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "PumpgStatn", "designation" : "Pumping station", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "off", 1 : "on and error acknowledgement" } },
12 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "EnableVent", "designation" : "Enable venting", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "no", 1 : "yes" } },
17 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "CfgSpdSwPt", "designation" : "Configuration rotation speed switchpoint", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "rotation speed switch point 1", 1 : "rotation speed switch points 1 and 2" } },
19 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg DO2", "designation" : "Configuration output DO2", "unit" : None, "min" : 0, "max" : 22, "persistent" : True, "default" : 1, "valueDescriptions" : { 0 : "Rotation speed switch point reached", 1 : "no error", 2 : "error", 3 : "warning", 4 : "error and/or warning", 5 : "set rotation speed reached", 6 : "pump on", 7 : "pump accelerating", 8 : "pump decelerating", 9 : "always 0", 10 : "always 1", 11 : "remote priority active", 12 : "heating", 13 : "backing pump", 14 : "sealing gas", 15 : "pumping station", 16 : "pump rotates", 17 : "pump does not rotate", 19 : "pressure switch point 1 underrund", 20 : "pressure switch point 2 underrun", 21 : "fore-vacuum valve, delayed", 22 : "backing pump standby" } },
23 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "MotorPump", "designation" : "Motor pump", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 1, "valueDescriptions" : { 0 : "off", 1 : "on" } },
24 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg DO1", "designation" : "Configuration output DO1", "unit" : None, "min" : 0, "max" : 22, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "Rotation speed switch point reached", 1 : "no error", 2 : "error", 3 : "warning", 4 : "error and/or warning", 5 : "set rotation speed reached", 6 : "pump on", 7 : "pump accelerating", 8 : "pump decelerating", 9 : "always 0", 10 : "always 1", 11 : "remote priority active", 12 : "heating", 13 : "backing pump", 14 : "sealing gas", 15 : "pumping station", 16 : "pump rotates", 17 : "pump does not rotate", 19 : "pressure switch point 1 underrund", 20 : "pressure switch point 2 underrun", 21 : "fore-vacuum valve, delayed", 22 : "backing pump standby" } },
25 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "OpMode BKP", "designation" : "Backing pump mode", "unit" : None, "min" : 0, "max" : 3, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "continuous operation", 1 : "intermittent operation", 2 : "delayed switching on", 3 : "delayed switching off" } },
26 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "SpdSetMode", "designation" : "Rotation speed setting mode", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "off", 1 : "on" } },
27 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "GasMode", "designation" : "Gas mode", "unit" : None, "min" : 0, "max" : 2, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "heavy gases", 1 : "light gases", 2 : "Helium" } },
30 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "VentMode", "designation" : "Venting mode", "unit" : None, "min" : 0, "max" : 2, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "delayed venting", 1 : "no venting", 2 : "direct venting" } },
35 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg Acc A1", "designation" : "Configuration accessory connection A1", "unit" : None, "min" : 0, "max" : 12, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "fan", 1 : "venting valve, closed without current", 2 : "heating", 3 : "backing pump", 4 : "fan (temperatuer controlled)", 5 : "sealing gas", 6 : "always 0", 7 : "always 1", 8 : "power failure venting unit", 9 : "TMS heating", 10 : "TMS cooling", 12 : "Second venting valve", 13 : "Sealing gas monitoring", 14 : "heating (bottom part temperature controlled)" } },
36 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg Acc B1", "designation" : "Configuration accessory connection B1", "unit" : None, "min" : 0, "max" : 12, "persistent" : True, "default" : 1, "valueDescriptions" : { 0 : "fan", 1 : "venting valve, closed without current", 2 : "heating", 3 : "backing pump", 4 : "fan (temperatuer controlled)", 5 : "sealing gas", 6 : "always 0", 7 : "always 1", 8 : "power failure venting unit", 9 : "TMS heating", 10 : "TMS cooling", 12 : "Second venting valve", 13 : "Sealing gas monitoring", 14 : "heating (bottom part temperature controlled)" } },
37 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg Acc A2", "designation" : "Configuration accessory connection B1", "unit" : None, "min" : 0, "max" : 12, "persistent" : True, "default" : 3, "valueDescriptions" : { 0 : "fan", 1 : "venting valve, closed without current", 2 : "heating", 3 : "backing pump", 4 : "fan (temperatuer controlled)", 5 : "sealing gas", 6 : "always 0", 7 : "always 1", 8 : "power failure venting unit", 9 : "TMS heating", 10 : "TMS cooling", 12 : "Second venting valve", 13 : "Sealing gas monitoring", 14 : "heating (bottom part temperature controlled)" } },
38 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg Acc B2", "designation" : "Configuration accessory connection B1", "unit" : None, "min" : 0, "max" : 12, "persistent" : True, "default" : 2, "valueDescriptions" : { 0 : "fan", 1 : "venting valve, closed without current", 2 : "heating", 3 : "backing pump", 4 : "fan (temperatuer controlled)", 5 : "sealing gas", 6 : "always 0", 7 : "always 1", 8 : "power failure venting unit", 9 : "TMS heating", 10 : "TMS cooling", 12 : "Second venting valve", 13 : "Sealing gas monitoring", 14 : "heating (bottom part temperature controlled)" } },
41 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Press1HVen", "designation" : "Enable integrated HV sensor (IKT only)", "unit" : None, "min" : 0, "max" : 3, "persistent" : True, "default" : 2, "valueDescriptions" : { 0 : "off", 1 : "on", 2 : "on, when rotation speed switch point reached", 3 : "on when pressure switch point underrun" } },
50 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "SealingGas", "designation" : "Sealing gas", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "off", 1 : "on" } },
55 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg AO1", "designation" : "Configurtation output AO1", "unit" : None, "min" : 0, "max" : 8, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "Actual rotation speed", 1 : "Output", 2 : "Current", 3 : "Always 0V", 4 : "Always 10V", 6 : "Pressure value 1", 7 : "Pressure value 2", 8 : "Fore-vacuum control" } },
60 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "CtrlViaInt", "designation" : "Control via Interface", "unit" : None, "min" : 0, "max" : 255, "persistent" : True, "default" : 1, "valueDescriptions" : { 1 : "Remote", 2 : "RS-485", 4 : "PV.can", 8 : "Fieldbus", 16 : "E74", 255 : "Unlock interface selection" } },
61 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "IntSelLckd", "designation" : "Interface selection locked", "unit" : None, "min" : 1, "max" : 1, "persistent" : True, "default" : 0, "valueDescriptions" : { 0 : "off", 1 : "on" } },
62 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg DI1", "designation" : "Configuration input DI1", "unit" : None, "min" : 0, "max" : 7, "persistent" : True, "default" : 1, "valueDescriptions" : { 0 : "deactivated" , 1 : "enable venting", 2 : "heating", 3 : "sealing gas", 4 : "run-up time monitoring", 5 : "rotation speed mode", 6 : "motor", 7 : "enable HV sensor 1" } },
63 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg DI2", "designation" : "Configuration input DI2", "unit" : None, "min" : 0, "max" : 7, "persistent" : True, "default" : 2, "valueDescriptions" : { 0 : "deactivated" , 1 : "enable venting", 2 : "heating", 3 : "sealing gas", 4 : "run-up time monitoring", 5 : "rotation speed mode", 6 : "motor", 7 : "enable HV sensor 1" } },
# Status requests (3xx)
300 : { "datatype" : 0, "access" : ACCESS_R, "display" : "RemotePrio", "designation" : "Remote priority", "unit" : None, "min" : 0, "max" : 1, "persistent" : False, "default" : None, "valueDescriptions" : { 0 : "no", 1 : "yes" } },
302 : { "datatype" : 0, "access" : ACCESS_R, "display" : "SpdSwPtAtt", "designation" : "Rotation speed switchpoint attained", "unit" : None, "min" : 0, "max" : 1, "persistent" : False, "default" : None, "valueDescriptions" : { 0 : "no", 1 : "yes" } },
303 : { "datatype" : 4, "access" : ACCESS_R, "display" : "Error code", "designation" : "Error code", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
304 : { "datatype" : 0, "access" : ACCESS_R, "display" : "OvTempElec", "designation" : "Excess temperature electronic drive unit", "unit" : None, "min" : 0, "max" : 1, "persistent" : False, "default" : None, "valueDescriptions" : { 0 : "no", 1 : "yes" } },
305 : { "datatype" : 0, "access" : ACCESS_R, "display" : "OvTempPump", "designation" : "Excess temperature pump", "unit" : None, "min" : 0, "max" : 1, "persistent" : False, "default" : None, "valueDescriptions" : { 0 : "no", 1 : "yes" } },
306 : { "datatype" : 0, "access" : ACCESS_R, "display" : "SetSpdAtt", "designation" : "Set rotation speed attained", "unit" : None, "min" : 0, "max" : 1, "persistent" : False, "default" : None, "valueDescriptions" : { 0 : "no", 1 : "yes" } },
307 : { "datatype" : 0, "access" : ACCESS_R, "display" : "PumpAccel", "designation" : "Pump accelerates", "unit" : None, "min" : 0, "max" : 1, "persistent" : False, "default" : None, "valueDescriptions" : { 0 : "no", 1 : "yes" } },
308 : { "datatype" : 1, "access" : ACCESS_R, "display" : "SetRotSpd", "designation" : "Set rotation speed", "unit" : "Hz", "min" : 0, "max" : 999999, "persistent" : False, "default" : None, "valueDescriptions" : None },
309 : { "datatype" : 1, "access" : ACCESS_R, "display" : "ActualSpd", "designation" : "Active rotation speed", "unit" : "Hz", "min" : 0, "max" : 999999, "persistent" : False, "default" : None, "valueDescriptions" : None },
310 : { "datatype" : 2, "access" : ACCESS_R, "display" : "DrvCurrent", "designation" : "Drive current", "unit" : "A", "min" : 0, "max" : 9999.99, "persistent" : False, "default" : None, "valueDescriptions" : None },
311 : { "datatype" : 1, "access" : ACCESS_R, "display" : "OpHrsPump", "designation" : "Operating hours pump", "unit" : "h", "min" : 0, "max" : 65535, "persistent" : True, "default" : None, "valueDescriptions" : None },
312 : { "datatype" : 4, "access" : ACCESS_R, "display" : "Fw version", "designation" : "Firmware version electronic drive unit", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
313 : { "datatype" : 2, "access" : ACCESS_R, "display" : "DrvVoltage", "designation" : "Drive voltage", "unit" : "V", "min" : 0, "max" : 9999.99, "persistent" : False, "default" : None, "valueDescriptions" : None },
314 : { "datatype" : 1, "access" : ACCESS_R, "display" : "OpHrsElec", "designation" : "Operating hours pump", "unit" : "h", "min" : 0, "max" : 65535, "persistent" : True, "default" : None, "valueDescriptions" : None },
315 : { "datatype" : 1, "access" : ACCESS_R, "display" : "Nominal Spd", "designation" : "Nominal rotation speed", "unit" : "Hz", "min" : 0, "max" : 999999, "persistent" : False, "default" : None, "valueDescriptions" : None },
316 : { "datatype" : 1, "access" : ACCESS_R, "display" : "DrvPower", "designation" : "Drive power", "unit" : "W", "min" : 0, "max" : 999999, "persistent" : False, "default" : None, "valueDescriptions" : None },
319 : { "datatype" : 1, "access" : ACCESS_R, "display" : "PumpCycles", "designation" : "Pump cycles", "unit" : None, "min" : 0, "max" : 65535, "persistent" : True, "default" : None, "valueDescriptions" : None },
326 : { "datatype" : 1, "access" : ACCESS_R, "display" : "TempElec", "designation" : "Temperature electronic", "unit" : "C", "min" : 0, "max" : 999999, "persistent" : False, "default" : None, "valueDescriptions" : None },
330 : { "datatype" : 1, "access" : ACCESS_R, "display" : "TempPmpBot", "designation" : "Temperature pump bottom part", "unit" : "C" , "min" : 0, "max" : 999999, "persistent" : False, "default" : None, "valueDescriptions" : None },
336 : { "datatype" : 1, "access" : ACCESS_R, "display" : "AccelDecel", "designation" : "Acceleration / Deceleration", "unit" : "rpm/s", "min" : 0, "max" : 999999, "persistent" : False, "default" : None, "valueDescriptions" : None },
342 : { "datatype" : 1, "access" : ACCESS_R, "display" : "TempBearng", "designation" : "Temperature bearing", "unit" : "C", "min" : 0, "max" : 999999, "persistent" : False, "default" : None, "valueDescriptions" : None },
346 : { "datatype" : 1, "access" : ACCESS_R, "display" : "TempMotor", "designation" : "Temperature motor", "unit" : "C", "min" : 0, "max" : 999999, "persistent" : False, "default" : None, "valueDescriptions" : None },
349 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ElecName", "designation" : "Name of electronic drive unit", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
354 : { "datatype" : 4, "access" : ACCESS_R, "display" : "HwVersion", "designation" : "Hardware version electronic drive unit", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
360 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist1", "designation" : "Error code history, position 1", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
361 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist2", "designation" : "Error code history, position 2", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
362 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist3", "designation" : "Error code history, position 3", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
363 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist4", "designation" : "Error code history, position 4", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
364 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist5", "designation" : "Error code history, position 5", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
365 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist6", "designation" : "Error code history, position 6", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
366 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist7", "designation" : "Error code history, position 7", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
367 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist8", "designation" : "Error code history, position 8", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
368 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist9", "designation" : "Error code history, position 9", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
369 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ErrHist10", "designation" : "Error code history, position 10", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
397 : { "datatype" : 1, "access" : ACCESS_R, "display" : "SetRotSpd", "designation" : "Set rotation speed", "unit" : "rpm", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
398 : { "datatype" : 1, "access" : ACCESS_R, "display" : "ActualSpd", "designation" : "Actual rotation speed", "unit" : "rpm", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
399 : { "datatype" : 1, "access" : ACCESS_R, "display" : "NominalSpd", "designation" : "Nominal rotation speed", "unit" : "rpm", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
# Set value settings (7xx)
700 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "RUTimeSVal", "designation" : "Set value run-up time", "unit" : "min", "min" : 1, "max" : 120, "persistent" : True, "default" : 8 , "valueDescriptions" : None },
701 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "SPdSwPt1", "designation" : "Rotation speed switchpoint 1", "unit" : "%", "min" : 50, "max" : 97, "persistent" : True, "default" : 80 , "valueDescriptions" : None },
707 : { "datatype" : 2, "access" : ACCESS_RW, "display" : "SpdSVal", "designation" : "Set value in rotation speed setting mode", "unit" : "%", "min" : 20, "max" : 100, "persistent" : True, "default" : 65 , "valueDescriptions" : None },
708 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "PwrSVal", "designation" : "Set value power consumption", "unit" : "%", "min" : 0, "max" : 100, "persistent" : True, "default" : 100 , "valueDescriptions" : None },
710 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "Swoff BKP", "designation" : "Switching off threshold for backing pump", "unit" : "W", "min" : 0, "max" : 1000, "persistent" : True, "default" : 0 , "valueDescriptions" : None },
711 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "Swon BKP", "designation" : "Switching on threshold for backing pump", "unit" : "W", "min" : 0, "max" : 1000, "persistent" : True, "default" : 0 , "valueDescriptions" : None },
717 : { "datatype" : 2, "access" : ACCESS_RW, "display" : "StdbySVal", "designation" : "Set value rotation speed at standby", "unit" : "%", "min" : 20, "max" : 100, "persistent" : True, "default" : 66.7, "valueDescriptions" : None },
719 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "SpdSwPt2", "designation" : "Rotation speed switchpoint 2", "unit" : "%", "min" : 5, "max" : 97, "persistent" : True, "default" : 20 , "valueDescriptions" : None },
720 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "VentSpd", "designation" : "Venting rotation speed at delayed venting", "unit" : "%", "min" : 40, "max" : 98, "persistent" : True, "default" : 50 , "valueDescriptions" : None },
721 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "VentTime", "designation" : "Venting time at delayed venting", "unit" : "s", "min" : 6, "max" : 3600, "persistent" : True, "default" : 3600, "valueDescriptions" : None },
730 : { "datatype" : 10, "access" : ACCESS_RW, "display" : "PrsSwPt 1", "designation" : "Pressure switchpoint 1", "unit" : "hPa", "min" : None, "max" : None, "persistent" : True, "default" : None, "valueDescriptions" : None },
732 : { "datatype" : 10, "access" : ACCESS_RW, "display" : "PrsSwPt 2", "designation" : "Pressure switchpoint 2", "unit" : "hPa", "min" : None, "max" : None, "persistent" : True, "default" : None, "valueDescriptions" : None },
739 : { "datatype" : 4, "access" : ACCESS_R, "display" : "PrsSn1Name", "designation" : "Pressure sensor 1 name", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
740 : { "datatype" : 10, "access" : ACCESS_RW, "display" : "Pressure 1", "designation" : "Pressure value 1", "unit" : "hPa", "min" : None, "max" : None, "persistent" : True, "default" : None, "valueDescriptions" : None },
742 : { "datatype" : 2, "access" : ACCESS_RW, "display" : "PrsCorrPi 1", "designation" : "Pressure correction factor 1", "unit" : None, "min" : None, "max" : None, "persistent" : True, "default" : None, "valueDescriptions" : None },
749 : { "datatype" : 4, "access" : ACCESS_R, "display" : "PrsSn2Name", "designation" : "Pressure sensor 2 name", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
750 : { "datatype" : 10, "access" : ACCESS_RW, "display" : "Pressure 2", "designation" : "Pressure value 2", "unit" : "hPa", "min" : None, "max" : None, "persistent" : True, "default" : None, "valueDescriptions" : None },
752 : { "datatype" : 2, "access" : ACCESS_RW, "display" : "PrsCorrPi2", "designation" : "Pressure correction factor 2", "unit" : None, "min" : None, "max" : None, "persistent" : True, "default" : None, "valueDescriptions" : None },
777 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "NomSpdConf", "designation" : "Nomial rotation speed confirmation", "unit" : "Hz", "min" : 0, "max" : 1500, "persistent" : True, "default" : 0 , "valueDescriptions" : None },
797 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "RS485Adr", "designation" : "RS-485 device address", "unit" : None, "min" : 1, "max" : 255, "persistent" : True, "default" : 1 , "valueDescriptions" : None },
# Additional values for DCU
340 : { "datatype" : 7, "access" : ACCESS_R, "display" : "Pressure", "designation" : "Actual pressure value (ActiveLine)", "unit" : "hPa", "min" : 1e-10, "max" : 1e3, "persistent" : False, "default" : None, "valueDescriptions" : None },
350 : { "datatype" : 4, "access" : ACCESS_R, "display" : "Ctr Name", "designation" : "Display and control panel type", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
351 : { "datatype" : 4, "access" : ACCESS_R, "display" : "Ctr Software","designation" : "Display and control panel software version","unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
738 : { "datatype" : 4, "access" : ACCESS_RW, "display" : "Gauge type", "designation" : "Type of pressure gauge", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
794 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Param set", "designation" : "Parameter set", "unit" : None, "min" : 0, "max" : 1, "persistent" : False, "default" : 0, "valueDescriptions" : { 0 : "Basic parameter set", 1 : "Extended parameter set" } },
795 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Servicelin", "designation" : "Insert service line", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : 795, "valueDescriptions" : None }
},
"MVP015" : {
2 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "Standby", "designation" : "Standby", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0 , "valueDescriptions" : { 0 : "off", 1 : "on" } },
9 : { "datatype" : 0, "access" : ACCESS_W, "display" : "ErrorAckn", "designation" : "Fault acknowledgement", "unit" : None, "min" : 1, "max" : 1, "persistent" : False, "default" : None, "valueDescriptions" : None },
10 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "PumpgStatn", "designation" : "Pump", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0 , "valueDescriptions" : { 0 : "off", 1 : "on" } },
19 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg DO2", "designation" : "Configuration output DO2", "unit" : None, "min" : 0, "max" : 20, "persistent" : True, "default" : 5 , "valueDescriptions" : { 1 : "no error", 2 : "error", 5 : "target speed is reached", 6: "pump on", 9 : "always 0", 10 : "always 1", 11: "remote priority active" } },
24 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "Cfg DO1", "designation" : "Configuration output DO1", "unit" : None, "min" : 0, "max" : 20, "persistent" : True, "default" : 1 , "valueDescriptions" : { 1 : "no error", 2 : "error", 5 : "target speed is reached", 6: "pump on", 9 : "always 0", 10 : "always 1", 11: "remote priority active" } },
26 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "SpdSetMode", "designation" : "Speed setting mode", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0 , "valueDescriptions" : { 0 : "off", 1 : "on" } },
30 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "ValveMode", "designation" : "Purge gas configuration", "unit" : None, "min" : 0, "max" : 2, "persistent" : True, "default" : 0 , "valueDescriptions" : { 0 : "auto", 1 : "closed", 2 : "open" } },
50 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "PurgeGas", "designation" : "Purge gas", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0 , "valueDescriptions" : { 0 : "off", 1 : "on" } },
60 : { "datatype" : 7, "access" : ACCESS_RW, "display" : "CtrlViaInt", "designation" : "Control via interface", "unit" : None, "min" : 0, "max" : 255, "persistent" : True, "default" : 1 , "valueDescriptions" : { 1 : "remote", 2 : "RS-485", 4 : "PV.can", 255 : "Unlock the interface selection" } },
61 : { "datatype" : 0, "access" : ACCESS_RW, "display" : "IntSelLckd", "designation" : "Interface selection locked", "unit" : None, "min" : 0, "max" : 1, "persistent" : True, "default" : 0 , "valueDescriptions" : { 0 : "off", 1 : "on" } },
303 : { "datatype" : 4, "access" : ACCESS_R, "display" : "Error code", "designation" : "Error code", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
309 : { "datatype" : 1, "access" : ACCESS_R, "display" : "ActualSpd", "designation" : "Actual speed", "unit" : "Hz", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
310 : { "datatype" : 2, "access" : ACCESS_R, "display" : "DrvCurrent", "designation" : "Drive current", "unit" : "A", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
311 : { "datatype" : 1, "access" : ACCESS_R, "display" : "OpHrsPump", "designation" : "Pump operating hours", "unit" : "h", "min" : None, "max" : None, "persistent" : True, "default" : None, "valueDescriptions" : None },
312 : { "datatype" : 4, "access" : ACCESS_R, "display" : "Fw version", "designation" : "Software version of the interface board", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
313 : { "datatype" : 2, "access" : ACCESS_R, "display" : "DrvVoltage", "designation" : "Supply voltage", "unit" : "V", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
314 : { "datatype" : 1, "access" : ACCESS_R, "display" : "OpHrsElec", "designation" : "Electronic drive unit operating hours", "unit" : "h", "min" : None, "max" : None, "persistent" : True, "default" : None, "valueDescriptions" : None },
315 : { "datatype" : 1, "access" : ACCESS_R, "display" : "Nominal Spd", "designation" : "Nominal speed", "unit" : "Hz", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
316 : { "datatype" : 1, "access" : ACCESS_R, "display" : "DrvPower", "designation" : "Drive power", "unit" : "W", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
330 : { "datatype" : 1, "access" : ACCESS_R, "display" : "TempPmpBot", "designation" : "Temperature of pump", "unit" : "C", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
349 : { "datatype" : 4, "access" : ACCESS_R, "display" : "ElecName", "designation" : "Device designation", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
398 : { "datatype" : 4, "access" : ACCESS_R, "display" : "HW version", "designation" : "Hardware version of the interface board", "unit" : None, "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
354 : { "datatype" : 1, "access" : ACCESS_R, "display" : "ActualSpd", "designation" : "Actual speed", "unit" : "rpm", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
399 : { "datatype" : 1, "access" : ACCESS_R, "display" : "NominalSpd", "designation" : "Nominal speed", "unit" : "rpm", "min" : None, "max" : None, "persistent" : False, "default" : None, "valueDescriptions" : None },
707 : { "datatype" : 2, "access" : ACCESS_RW, "display" : "SpdSVal", "designation" : "Setpoint in speed setting mode", "unit" : "%", "min" : 30, "max" : 170, "persistent" : True, "default" : 75 , "valueDescriptions" : None },
717 : { "datatype" : 2, "access" : ACCESS_RW, "display" : "StdbySVal", "designation" : "Setpoint speed in standby mode", "unit" : "%", "min" : 30, "max" : 100, "persistent" : True, "default" : 66.7, "valueDescriptions" : None },
721 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "SlgVlvTime", "designation" : "Setting for purge gas active", "unit" : "s", "min" : 5, "max" : 255, "persistent" : True, "default" : 60 , "valueDescriptions" : None },
797 : { "datatype" : 1, "access" : ACCESS_RW, "display" : "RS485Adr", "designation" : "RS485 interface address", "unit" : None, "min" : 1, "max" : 255, "persistent" : True, "default" : 2 , "valueDescriptions" : None }
}
}
| 111.672018 | 819 | 0.551521 |
953c89c952d810fd9049199b6b71cca25092c14a | 14,437 | py | Python | grr/lib/lexer.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | grr/lib/lexer.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | 1 | 2018-05-08T21:15:51.000Z | 2018-05-08T21:15:51.000Z | grr/lib/lexer.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""An LL(1) lexer. This lexer is very tolerant of errors and can resync."""
import logging
import re
from grr.lib import utils
class Token(object):
"""A token action."""
state_regex = None
def __init__(self, state_regex, regex, actions, next_state, flags=re.I):
"""Constructor.
Args:
state_regex: If this regular expression matches the current state this
rule is considered.
regex: A regular expression to try and match from the current point.
actions: A command separated list of method names in the Lexer to call.
next_state: The next state we transition to if this Token matches.
flags: re flags.
"""
if state_regex:
self.state_regex = re.compile(state_regex, re.DOTALL | re.M | re.S | re.U
| flags)
self.regex = re.compile(regex, re.DOTALL | re.M | re.S | re.U | flags)
self.re_str = regex
self.actions = []
if actions:
self.actions = actions.split(",")
self.next_state = next_state
def Action(self, lexer):
"""Method is called when the token matches."""
class Error(Exception):
"""Module exception."""
class ParseError(Error):
"""A parse error occured."""
class Lexer(object):
"""A generic feed lexer."""
# A list of Token() instances.
tokens = []
# Regex flags
flags = 0
def __init__(self, data=""):
# Set the lexer up to process a new data feed.
self.Reset()
# Populate internal token list with class tokens, if defined.
self._tokens = self.tokens[:]
# Populate the lexer with any data we got.
self.buffer = utils.SmartStr(data)
def Reset(self):
"""Reset the lexer to process a new data feed."""
# The first state
self.state = "INITIAL"
self.state_stack = []
# The buffer we are parsing now
self.buffer = ""
self.error = 0
self.verbose = 0
# The index into the buffer where we are currently pointing
self.processed = 0
self.processed_buffer = ""
def NextToken(self):
"""Fetch the next token by trying to match any of the regexes in order."""
# Nothing in the input stream - no token can match.
if not self.buffer:
return
current_state = self.state
for token in self._tokens:
# Does the rule apply to us?
if token.state_regex and not token.state_regex.match(current_state):
continue
if self.verbose:
logging.debug("%s: Trying to match %r with %r", self.state,
self.buffer[:10], token.re_str)
# Try to match the rule
m = token.regex.match(self.buffer)
if not m:
continue
if self.verbose:
logging.debug("%s matched %s", token.re_str, m.group(0))
# A token matched the empty string. We can not consume the token from the
# input stream.
if m.end() == 0:
raise RuntimeError("Lexer bug! Token can not match the empty string.")
# The match consumes the data off the buffer (the handler can put it back
# if it likes)
self.processed_buffer += self.buffer[:m.end()]
self.buffer = self.buffer[m.end():]
self.processed += m.end()
next_state = token.next_state
for action in token.actions:
if self.verbose:
logging.debug("Calling %s with %s", action, m.group(0))
# Is there a callback to handle this action?
cb = getattr(self, action, self.Default)
# Allow a callback to skip other callbacks.
try:
possible_next_state = cb(string=m.group(0), match=m)
if possible_next_state == "CONTINUE":
continue
# Override the state from the Token
elif possible_next_state:
next_state = possible_next_state
except ParseError as e:
self.Error(e)
# Update the next state
if next_state:
self.state = next_state
return token
# Check that we are making progress - if we are too full, we assume we are
# stuck.
self.Error("Lexer stuck at state %s" % (self.state))
self.processed_buffer += self.buffer[:1]
self.buffer = self.buffer[1:]
return "Error"
def Feed(self, data):
self.buffer += data
def Empty(self):
return not self.buffer
def Default(self, **kwarg):
logging.debug("Default handler: %s", kwarg)
def Error(self, message=None, weight=1):
logging.debug("Error(%s): %s", weight, message)
# Keep a count of errors
self.error += weight
def PushState(self, **_):
"""Push the current state on the state stack."""
if self.verbose:
logging.debug("Storing state %r", self.state)
self.state_stack.append(self.state)
def PopState(self, **_):
"""Pop the previous state from the stack."""
try:
self.state = self.state_stack.pop()
if self.verbose:
logging.debug("Returned state to %s", self.state)
return self.state
except IndexError:
self.Error("Tried to pop the state but failed - possible recursion error")
def PushBack(self, string="", **_):
"""Push the match back on the stream."""
self.buffer = string + self.buffer
self.processed_buffer = self.processed_buffer[:-len(string)]
def Close(self):
"""A convenience function to force us to parse all the data."""
while self.NextToken():
if not self.buffer:
return
class SelfFeederMixIn(Lexer):
"""This mixin is used to make a lexer which feeds itself.
Note that self.fd must be the fd we read from.
"""
def __init__(self, fd=""):
self.fd = fd
super(SelfFeederMixIn, self).__init__()
def NextToken(self, end=True):
# If we dont have enough data - feed ourselves: We assume
# that we must have at least one sector in our buffer.
if len(self.buffer) < 512:
if self.Feed() == 0 and not self.buffer:
return None
return Lexer.next_token(self, end)
def Feed(self, size=512):
data = self.fd.read(size)
Lexer.feed(self, data)
return len(data)
class Expression(object):
"""A class representing an expression."""
attribute = None
args = None
operator = None
# The expected number of args
number_of_args = 1
def __init__(self):
self.args = []
def SetAttribute(self, attribute):
self.attribute = attribute
def SetOperator(self, operator):
self.operator = operator
def AddArg(self, arg):
"""Adds a new arg to this expression.
Args:
arg: The argument to add (string).
Returns:
True if this arg is the last arg, False otherwise.
Raises:
ParseError: If there are too many args.
"""
self.args.append(arg)
if len(self.args) > self.number_of_args:
raise ParseError("Too many args for this expression.")
elif len(self.args) == self.number_of_args:
return True
return False
def __str__(self):
return "Expression: (%s) (%s) %s" % (self.attribute, self.operator,
self.args)
def PrintTree(self, depth=""):
return "%s %s" % (depth, self)
def Compile(self, filter_implemention):
"""Given a filter implementation, compile this expression."""
raise NotImplementedError(
"%s does not implement Compile." % self.__class__.__name__)
class BinaryExpression(Expression):
"""An expression which takes two other expressions."""
def __init__(self, operator="", part=None):
self.operator = operator
self.args = []
if part:
self.args.append(part)
super(BinaryExpression, self).__init__()
def __str__(self):
return "Binary Expression: %s %s" % (self.operator,
[str(x) for x in self.args])
def AddOperands(self, lhs, rhs):
if isinstance(lhs, Expression) and isinstance(rhs, Expression):
self.args.insert(0, lhs)
self.args.append(rhs)
else:
raise ParseError("Expected expression, got %s %s %s" %
(lhs, self.operator, rhs))
def PrintTree(self, depth=""):
result = "%s%s\n" % (depth, self.operator)
for part in self.args:
result += "%s-%s\n" % (depth, part.PrintTree(depth + " "))
return result
def Compile(self, filter_implemention):
"""Compile the binary expression into a filter object."""
operator = self.operator.lower()
if operator == "and" or operator == "&&":
method = "AndFilter"
elif operator == "or" or operator == "||":
method = "OrFilter"
else:
raise ParseError("Invalid binary operator %s" % operator)
args = [x.Compile(filter_implemention) for x in self.args]
return filter_implemention.GetFilter(method)(*args)
class IdentityExpression(Expression):
"""An Expression which always evaluates to True."""
def Compile(self, filter_implemention):
return filter_implemention.IdentityFilter()
class SearchParser(Lexer):
"""This parser can parse the mini query language and build an AST.
Examples of valid syntax:
filename contains "foo" and (size > 100k or date before "2011-10")
date between 2011 and 2010
files older than 1 year
"""
expression_cls = Expression
binary_expression_cls = BinaryExpression
identity_expression_cls = IdentityExpression
string = ""
tokens = [
# Double quoted string
Token("STRING", "\"", "PopState,StringFinish", None),
Token("STRING", r"\\(.)", "StringEscape", None),
Token("STRING", r"[^\\\"]+", "StringInsert", None),
# Single quoted string
Token("SQ_STRING", "'", "PopState,StringFinish", None),
Token("SQ_STRING", r"\\(.)", "StringEscape", None),
Token("SQ_STRING", r"[^\\']+", "StringInsert", None),
# TODO(user): Implement a unary not operator.
# The first thing we see in the initial state takes up to the ATTRIBUTE
Token("INITIAL", r"(and|or|\&\&|\|\|)", "BinaryOperator", None),
Token("INITIAL", r"[^\s\(\)]", "PushState,PushBack", "ATTRIBUTE"),
Token("INITIAL", r"\(", "BracketOpen", None),
Token("INITIAL", r"\)", "BracketClose", None),
Token("ATTRIBUTE", r"[\w._0-9]+", "StoreAttribute", "OPERATOR"),
Token("OPERATOR", r"[a-z0-9<>=\-\+\!\^\&%]+", "StoreOperator",
"ARG_LIST"),
Token("OPERATOR", "(!=|[<>=])", "StoreSpecialOperator", "ARG_LIST"),
Token("ARG_LIST", r"[^\s'\"]+", "InsertArg", None),
# Start a string.
Token(".", "\"", "PushState,StringStart", "STRING"),
Token(".", "'", "PushState,StringStart", "SQ_STRING"),
# Skip whitespace.
Token(".", r"\s+", None, None),
]
def __init__(self, data):
# Holds expression
self.current_expression = self.expression_cls()
self.filter_string = data
# The token stack
self.stack = []
Lexer.__init__(self, data)
def BinaryOperator(self, string=None, **_):
self.stack.append(self.binary_expression_cls(string))
def BracketOpen(self, **_):
self.stack.append("(")
def BracketClose(self, **_):
self.stack.append(")")
def StringStart(self, **_):
self.string = ""
def StringEscape(self, string, match, **_):
"""Escape backslashes found inside a string quote.
Backslashes followed by anything other than ['"rnbt] will just be included
in the string.
Args:
string: The string that matched.
match: The match object (m.group(1) is the escaped code)
"""
if match.group(1) in "'\"rnbt":
self.string += string.decode("string_escape")
else:
self.string += string
def StringInsert(self, string="", **_):
self.string += string
def StringFinish(self, **_):
if self.state == "ATTRIBUTE":
return self.StoreAttribute(string=self.string)
elif self.state == "ARG_LIST":
return self.InsertArg(string=self.string)
def StoreAttribute(self, string="", **_):
if self.verbose:
logging.debug("Storing attribute %r", string)
# TODO(user): Update the expected number_of_args
try:
self.current_expression.SetAttribute(string)
except AttributeError:
raise ParseError("Invalid attribute '%s'" % string)
return "OPERATOR"
def StoreOperator(self, string="", **_):
if self.verbose:
logging.debug("Storing operator %r", string)
self.current_expression.SetOperator(string)
def InsertArg(self, string="", **_):
"""Insert an arg to the current expression."""
if self.verbose:
logging.debug("Storing Argument %s", utils.SmartUnicode(string))
# This expression is complete
if self.current_expression.AddArg(string):
self.stack.append(self.current_expression)
self.current_expression = self.expression_cls()
return self.PopState()
def _CombineBinaryExpressions(self, operator):
for i in range(1, len(self.stack) - 1):
item = self.stack[i]
if (isinstance(item, BinaryExpression) and item.operator == operator and
isinstance(self.stack[i - 1], Expression) and
isinstance(self.stack[i + 1], Expression)):
lhs = self.stack[i - 1]
rhs = self.stack[i + 1]
self.stack[i].AddOperands(lhs, rhs)
self.stack[i - 1] = None
self.stack[i + 1] = None
self.stack = filter(None, self.stack)
def _CombineParenthesis(self):
for i in range(len(self.stack) - 2):
if (self.stack[i] == "(" and self.stack[i + 2] == ")" and
isinstance(self.stack[i + 1], Expression)):
self.stack[i] = None
self.stack[i + 2] = None
self.stack = filter(None, self.stack)
def Reduce(self):
"""Reduce the token stack into an AST."""
# Check for sanity
if self.state != "INITIAL":
self.Error("Premature end of expression")
length = len(self.stack)
while length > 1:
# Precendence order
self._CombineParenthesis()
self._CombineBinaryExpressions("and")
self._CombineBinaryExpressions("or")
# No change
if len(self.stack) == length:
break
length = len(self.stack)
if length != 1:
self.Error("Illegal query expression")
return self.stack[0]
def Error(self, message=None, weight=1):
raise ParseError(u"%s in position %s: %s <----> %s )" %
(utils.SmartUnicode(message), len(self.processed_buffer),
self.processed_buffer, self.buffer))
def Parse(self):
if not self.filter_string:
return self.identity_expression_cls()
self.Close()
return self.Reduce()
| 29.04829 | 80 | 0.626377 |
48103576b9d5c35345b18525aea187f3b526380d | 9,561 | py | Python | mmfewshot/detection/models/detectors/attention_rpn_detector.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 376 | 2021-11-23T13:29:57.000Z | 2022-03-30T07:22:14.000Z | mmfewshot/detection/models/detectors/attention_rpn_detector.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 51 | 2021-11-23T14:45:08.000Z | 2022-03-30T03:37:15.000Z | mmfewshot/detection/models/detectors/attention_rpn_detector.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 56 | 2021-11-23T14:02:27.000Z | 2022-03-31T09:01:50.000Z | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional
import numpy as np
import torch
from mmcv.runner import auto_fp16
from mmcv.utils import ConfigDict
from mmdet.core import bbox2roi
from mmdet.models.builder import DETECTORS
from torch import Tensor
from .query_support_detector import QuerySupportDetector
@DETECTORS.register_module()
class AttentionRPNDetector(QuerySupportDetector):
"""Implementation of `AttentionRPN <https://arxiv.org/abs/1908.01998>`_.
Args:
backbone (dict): Config of the backbone for query data.
neck (dict | None): Config of the neck for query data and
probably for support data. Default: None.
support_backbone (dict | None): Config of the backbone for
support data only. If None, support and query data will
share same backbone. Default: None.
support_neck (dict | None): Config of the neck for support
data only. Default: None.
rpn_head (dict | None): Config of rpn_head. Default: None.
roi_head (dict | None): Config of roi_head. Default: None.
train_cfg (dict | None): Training config. Useless in CenterNet,
but we keep this variable for SingleStageDetector. Default: None.
test_cfg (dict | None): Testing config of CenterNet. Default: None.
pretrained (str | None): model pretrained path. Default: None.
init_cfg (dict | list[dict] | None): Initialization config dict.
Default: None.
"""
def __init__(self,
backbone: ConfigDict,
neck: Optional[ConfigDict] = None,
support_backbone: Optional[ConfigDict] = None,
support_neck: Optional[ConfigDict] = None,
rpn_head: Optional[ConfigDict] = None,
roi_head: Optional[ConfigDict] = None,
train_cfg: Optional[ConfigDict] = None,
test_cfg: Optional[ConfigDict] = None,
pretrained: Optional[ConfigDict] = None,
init_cfg: Optional[ConfigDict] = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
support_backbone=support_backbone,
support_neck=support_neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
self.is_model_init = False
# save support template features for model initialization,
# `_forward_saved_support_dict` used in :func:`forward_model_init`.
self._forward_saved_support_dict = {
'gt_labels': [],
'res4_roi_feats': [],
'res5_roi_feats': []
}
# save processed support template features for inference,
# the processed support template features are generated
# in :func:`model_init`
self.inference_support_dict = {}
@auto_fp16(apply_to=('img', ))
def extract_support_feat(self, img: Tensor) -> List[Tensor]:
"""Extract features of support data.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
list[Tensor]: Features of support images, each item with shape
(N, C, H, W).
"""
feats = self.support_backbone(img)
if self.support_neck is not None:
feats = self.support_neck(feats)
return feats
def forward_model_init(self,
img: Tensor,
img_metas: List[Dict],
gt_bboxes: List[Tensor] = None,
gt_labels: List[Tensor] = None,
**kwargs) -> Dict:
"""Extract and save support features for model initialization.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: `img_shape`, `scale_factor`, `flip`, and may also contain
`filename`, `ori_shape`, `pad_shape`, and `img_norm_cfg`.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box.
Returns:
dict: A dict contains following keys:
- `gt_labels` (Tensor): class indices corresponding to each
feature.
- `res4_roi_feat` (Tensor): roi features of res4 layer.
- `res5_roi_feat` (Tensor): roi features of res5 layer.
"""
self.is_model_init = False
# extract support template features will reset `is_model_init` flag
assert gt_bboxes is not None and gt_labels is not None, \
'forward support template require gt_bboxes and gt_labels.'
assert len(gt_labels) == img.size(0), \
'Support instance have more than two labels'
feats = self.extract_support_feat(img)
rois = bbox2roi([bboxes for bboxes in gt_bboxes])
res4_roi_feat = self.rpn_head.extract_roi_feat(feats, rois)
res5_roi_feat = self.roi_head.extract_roi_feat(feats, rois)
self._forward_saved_support_dict['gt_labels'].extend(gt_labels)
self._forward_saved_support_dict['res4_roi_feats'].append(
res4_roi_feat)
self._forward_saved_support_dict['res5_roi_feats'].append(
res5_roi_feat)
return {
'gt_labels': gt_labels,
'res4_roi_feats': res4_roi_feat,
'res5_roi_feats': res5_roi_feat
}
def model_init(self) -> None:
"""process the saved support features for model initialization."""
self.inference_support_dict.clear()
gt_labels = torch.cat(self._forward_saved_support_dict['gt_labels'])
# used for attention rpn head
res4_roi_feats = torch.cat(
self._forward_saved_support_dict['res4_roi_feats'])
# used for multi relation head
res5_roi_feats = torch.cat(
self._forward_saved_support_dict['res5_roi_feats'])
class_ids = set(gt_labels.data.tolist())
for class_id in class_ids:
self.inference_support_dict[class_id] = {
'res4_roi_feats':
res4_roi_feats[gt_labels == class_id].mean([0, 2, 3], True),
'res5_roi_feats':
res5_roi_feats[gt_labels == class_id].mean([0], True)
}
# set the init flag
self.is_model_init = True
# clear support dict
for k in self._forward_saved_support_dict.keys():
self._forward_saved_support_dict[k].clear()
def simple_test(self,
img: Tensor,
img_metas: List[Dict],
proposals: Optional[List[Tensor]] = None,
rescale: bool = False) -> List[List[np.ndarray]]:
"""Test without augmentation.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: `img_shape`, `scale_factor`, `flip`, and may also contain
`filename`, `ori_shape`, `pad_shape`, and `img_norm_cfg`.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
proposals (list[Tensor] | None): override rpn proposals with
custom proposals. Use when `with_rpn` is False. Default: None.
rescale (bool): If True, return boxes in original image space.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
assert len(img_metas) == 1, 'Only support single image inference.'
if (self.inference_support_dict == {}) or (not self.is_model_init):
# process the saved support features
self.model_init()
results_dict = {}
query_feats = self.extract_feat(img)
for class_id in self.inference_support_dict.keys():
support_res4_roi_feat = \
self.inference_support_dict[class_id]['res4_roi_feats']
support_res5_roi_feat = \
self.inference_support_dict[class_id]['res5_roi_feats']
if proposals is None:
proposal_list = self.rpn_head.simple_test(
query_feats, support_res4_roi_feat, img_metas)
else:
proposal_list = proposals
results_dict[class_id] = self.roi_head.simple_test(
query_feats,
support_res5_roi_feat,
proposal_list,
img_metas,
rescale=rescale)
results = [
results_dict[i][0][0] for i in sorted(results_dict.keys())
if len(results_dict[i])
]
return [results]
| 43.459091 | 78 | 0.599414 |
b85e5dcf363284d8d31371e8f449c931e0efe6f4 | 192 | py | Python | rllib/algorithms/alpha_star/__init__.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | rllib/algorithms/alpha_star/__init__.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | 41 | 2021-09-21T01:13:48.000Z | 2022-03-19T07:12:22.000Z | rllib/algorithms/alpha_star/__init__.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | from ray.rllib.algorithms.alpha_star.alpha_star import (
AlphaStar,
AlphaStarConfig,
DEFAULT_CONFIG,
)
__all__ = [
"AlphaStar",
"AlphaStarConfig",
"DEFAULT_CONFIG",
]
| 16 | 56 | 0.682292 |
aa9755c12e9faa4abbe16fc6701be6768d045131 | 2,979 | py | Python | scripts/tilestache-render.py | shoeberto/TileStache | 4526076e9326512a0542adaae86a946e08df8547 | [
"BSD-3-Clause"
] | 414 | 2015-01-05T19:29:22.000Z | 2022-03-26T03:39:42.000Z | scripts/tilestache-render.py | shoeberto/TileStache | 4526076e9326512a0542adaae86a946e08df8547 | [
"BSD-3-Clause"
] | 134 | 2015-01-15T08:25:55.000Z | 2021-09-02T16:06:00.000Z | scripts/tilestache-render.py | shoeberto/TileStache | 4526076e9326512a0542adaae86a946e08df8547 | [
"BSD-3-Clause"
] | 176 | 2015-01-09T14:43:25.000Z | 2022-03-04T16:53:27.000Z | #!/usr/bin/env python
"""tilestache-render.py will warm your cache.
This script is *deprecated* and will be removed in a future TileStache 2.0.
This script is intended to be run directly. This example will save two tiles
for San Francisco and Oakland to local temporary files:
tilestache-render.py -c ./config.json -l osm 12/655/1582.png 12/656/1582.png
Output for this sample might look like this:
/tmp/tile-_G3uHX.png
/tmp/tile-pWNfQQ.png
...where each line corresponds to one of the given coordinates, in order.
You are expected to use these files and then dispose of them.
See `tilestache-render.py --help` for more information.
"""
from __future__ import print_function
import re
import os
from tempfile import mkstemp
from optparse import OptionParser
from TileStache import parseConfig, getTile
from TileStache.Core import KnownUnknown
from ModestMaps.Core import Coordinate
parser = OptionParser(usage="""%prog [options] [coord...]
Each coordinate in the argument list should look like "12/656/1582.png", similar
to URL paths in web server usage. Coordinates are processed in order, each one
rendered to an image file in a temporary location and output to stdout in order.
Configuration and layer options are required; see `%prog --help` for info.""")
parser.add_option('-c', '--config', dest='config',
help='Path to configuration file.')
parser.add_option('-l', '--layer', dest='layer',
help='Layer name from configuration.')
pathinfo_pat = re.compile(r'^(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+)\.(?P<e>\w+)$')
if __name__ == '__main__':
options, paths = parser.parse_args()
try:
if options.config is None:
raise KnownUnknown('Missing required configuration (--config) parameter.')
if options.layer is None:
raise KnownUnknown('Missing required layer (--layer) parameter.')
config = parseConfig(options.config)
if options.layer not in config.layers:
raise KnownUnknown('"%s" is not a layer I know about. Here are some that I do know about: %s.' % (options.layer, ', '.join(sorted(config.layers.keys()))))
layer = config.layers[options.layer]
coords = []
for path in paths:
path_ = pathinfo_pat.match(path)
if path_ is None:
raise KnownUnknown('"%s" is not a path I understand. I was expecting something more like "0/0/0.png".' % path)
row, column, zoom, extension = [path_.group(p) for p in 'yxze']
coord = Coordinate(int(row), int(column), int(zoom))
coords.append(coord)
except KnownUnknown as e:
parser.error(str(e))
for coord in coords:
# render
mimetype, content = getTile(layer, coord, extension)
# save
handle, filename = mkstemp(prefix='tile-', suffix='.'+extension)
os.write(handle, content)
os.close(handle)
# inform
print(filename)
| 32.032258 | 166 | 0.664317 |
c7c891023b016e67fe34dc9cb34cb1f7222b9657 | 46,161 | py | Python | tests/unit/gapic/dialogflow_v2/test_sessions.py | martini9393/python-dialogflow | 69bf02c733c7116840b15992f505cc298ed55b86 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/dialogflow_v2/test_sessions.py | martini9393/python-dialogflow | 69bf02c733c7116840b15992f505cc298ed55b86 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/dialogflow_v2/test_sessions.py | martini9393/python-dialogflow | 69bf02c733c7116840b15992f505cc298ed55b86 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2.services.sessions import SessionsAsyncClient
from google.cloud.dialogflow_v2.services.sessions import SessionsClient
from google.cloud.dialogflow_v2.services.sessions import transports
from google.cloud.dialogflow_v2.types import audio_config
from google.cloud.dialogflow_v2.types import context
from google.cloud.dialogflow_v2.types import entity_type
from google.cloud.dialogflow_v2.types import session
from google.cloud.dialogflow_v2.types import session as gcd_session
from google.cloud.dialogflow_v2.types import session_entity_type
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import struct_pb2 as struct # type: ignore
from google.rpc import status_pb2 as status # type: ignore
from google.type import latlng_pb2 as latlng # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SessionsClient._get_default_mtls_endpoint(None) is None
assert SessionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
SessionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
SessionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SessionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert SessionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [SessionsClient, SessionsAsyncClient])
def test_sessions_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_sessions_client_get_transport_class():
transport = SessionsClient.get_transport_class()
assert transport == transports.SessionsGrpcTransport
transport = SessionsClient.get_transport_class("grpc")
assert transport == transports.SessionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SessionsClient, transports.SessionsGrpcTransport, "grpc"),
(SessionsAsyncClient, transports.SessionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
SessionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SessionsClient)
)
@mock.patch.object(
SessionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SessionsAsyncClient),
)
def test_sessions_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SessionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SessionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(SessionsClient, transports.SessionsGrpcTransport, "grpc", "true"),
(
SessionsAsyncClient,
transports.SessionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(SessionsClient, transports.SessionsGrpcTransport, "grpc", "false"),
(
SessionsAsyncClient,
transports.SessionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
SessionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SessionsClient)
)
@mock.patch.object(
SessionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SessionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_sessions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
ssl_channel_creds = mock.Mock()
with mock.patch(
"grpc.ssl_channel_credentials", return_value=ssl_channel_creds
):
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
ssl_channel_credentials=expected_ssl_channel_creds,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.grpc.SslCredentials.__init__", return_value=None
):
with mock.patch(
"google.auth.transport.grpc.SslCredentials.is_mtls",
new_callable=mock.PropertyMock,
) as is_mtls_mock:
with mock.patch(
"google.auth.transport.grpc.SslCredentials.ssl_credentials",
new_callable=mock.PropertyMock,
) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = (
ssl_credentials_mock.return_value
)
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
ssl_channel_credentials=expected_ssl_channel_creds,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.grpc.SslCredentials.__init__", return_value=None
):
with mock.patch(
"google.auth.transport.grpc.SslCredentials.is_mtls",
new_callable=mock.PropertyMock,
) as is_mtls_mock:
is_mtls_mock.return_value = False
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SessionsClient, transports.SessionsGrpcTransport, "grpc"),
(SessionsAsyncClient, transports.SessionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_sessions_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SessionsClient, transports.SessionsGrpcTransport, "grpc"),
(SessionsAsyncClient, transports.SessionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_sessions_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_sessions_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2.services.sessions.transports.SessionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = SessionsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_detect_intent(
transport: str = "grpc", request_type=gcd_session.DetectIntentRequest
):
client = SessionsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_session.DetectIntentResponse(
response_id="response_id_value", output_audio=b"output_audio_blob",
)
response = client.detect_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_session.DetectIntentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_session.DetectIntentResponse)
assert response.response_id == "response_id_value"
assert response.output_audio == b"output_audio_blob"
def test_detect_intent_from_dict():
test_detect_intent(request_type=dict)
@pytest.mark.asyncio
async def test_detect_intent_async(
transport: str = "grpc_asyncio", request_type=gcd_session.DetectIntentRequest
):
client = SessionsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_session.DetectIntentResponse(
response_id="response_id_value", output_audio=b"output_audio_blob",
)
)
response = await client.detect_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_session.DetectIntentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_session.DetectIntentResponse)
assert response.response_id == "response_id_value"
assert response.output_audio == b"output_audio_blob"
@pytest.mark.asyncio
async def test_detect_intent_async_from_dict():
await test_detect_intent_async(request_type=dict)
def test_detect_intent_field_headers():
client = SessionsClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_session.DetectIntentRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
call.return_value = gcd_session.DetectIntentResponse()
client.detect_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_detect_intent_field_headers_async():
client = SessionsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_session.DetectIntentRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_session.DetectIntentResponse()
)
await client.detect_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
def test_detect_intent_flattened():
client = SessionsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_session.DetectIntentResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.detect_intent(
session="session_value",
query_input=gcd_session.QueryInput(
audio_config=audio_config.InputAudioConfig(
audio_encoding=audio_config.AudioEncoding.AUDIO_ENCODING_LINEAR_16
)
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].session == "session_value"
assert args[0].query_input == gcd_session.QueryInput(
audio_config=audio_config.InputAudioConfig(
audio_encoding=audio_config.AudioEncoding.AUDIO_ENCODING_LINEAR_16
)
)
def test_detect_intent_flattened_error():
client = SessionsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.detect_intent(
gcd_session.DetectIntentRequest(),
session="session_value",
query_input=gcd_session.QueryInput(
audio_config=audio_config.InputAudioConfig(
audio_encoding=audio_config.AudioEncoding.AUDIO_ENCODING_LINEAR_16
)
),
)
@pytest.mark.asyncio
async def test_detect_intent_flattened_async():
client = SessionsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_session.DetectIntentResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_session.DetectIntentResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.detect_intent(
session="session_value",
query_input=gcd_session.QueryInput(
audio_config=audio_config.InputAudioConfig(
audio_encoding=audio_config.AudioEncoding.AUDIO_ENCODING_LINEAR_16
)
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].session == "session_value"
assert args[0].query_input == gcd_session.QueryInput(
audio_config=audio_config.InputAudioConfig(
audio_encoding=audio_config.AudioEncoding.AUDIO_ENCODING_LINEAR_16
)
)
@pytest.mark.asyncio
async def test_detect_intent_flattened_error_async():
client = SessionsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.detect_intent(
gcd_session.DetectIntentRequest(),
session="session_value",
query_input=gcd_session.QueryInput(
audio_config=audio_config.InputAudioConfig(
audio_encoding=audio_config.AudioEncoding.AUDIO_ENCODING_LINEAR_16
)
),
)
def test_streaming_detect_intent(
transport: str = "grpc", request_type=session.StreamingDetectIntentRequest
):
client = SessionsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_detect_intent), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter([session.StreamingDetectIntentResponse()])
response = client.streaming_detect_intent(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, session.StreamingDetectIntentResponse)
def test_streaming_detect_intent_from_dict():
test_streaming_detect_intent(request_type=dict)
@pytest.mark.asyncio
async def test_streaming_detect_intent_async(
transport: str = "grpc_asyncio", request_type=session.StreamingDetectIntentRequest
):
client = SessionsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_detect_intent), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[session.StreamingDetectIntentResponse()]
)
response = await client.streaming_detect_intent(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, session.StreamingDetectIntentResponse)
@pytest.mark.asyncio
async def test_streaming_detect_intent_async_from_dict():
await test_streaming_detect_intent_async(request_type=dict)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SessionsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SessionsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SessionsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SessionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.SessionsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SessionsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SessionsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = SessionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SessionsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SessionsGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.SessionsGrpcTransport, transports.SessionsGrpcAsyncIOTransport],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SessionsClient(credentials=credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.SessionsGrpcTransport,)
def test_sessions_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.SessionsTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_sessions_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2.services.sessions.transports.SessionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SessionsTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"detect_intent",
"streaming_detect_intent",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_sessions_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2.services.sessions.transports.SessionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.SessionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_sessions_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, "default") as adc, mock.patch(
"google.cloud.dialogflow_v2.services.sessions.transports.SessionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.SessionsTransport()
adc.assert_called_once()
def test_sessions_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
SessionsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
def test_sessions_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.SessionsGrpcTransport(
host="squid.clam.whelk", quota_project_id="octopus"
)
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_sessions_host_no_port():
client = SessionsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_sessions_host_with_port():
client = SessionsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_sessions_grpc_transport_channel():
channel = grpc.insecure_channel("http://localhost/")
# Check that channel is used if provided.
transport = transports.SessionsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_sessions_grpc_asyncio_transport_channel():
channel = aio.insecure_channel("http://localhost/")
# Check that channel is used if provided.
transport = transports.SessionsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
"transport_class",
[transports.SessionsGrpcTransport, transports.SessionsGrpcAsyncIOTransport],
)
def test_sessions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel", autospec=True
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
"transport_class",
[transports.SessionsGrpcTransport, transports.SessionsGrpcAsyncIOTransport],
)
def test_sessions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel", autospec=True
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_context_path():
project = "squid"
session = "clam"
context = "whelk"
expected = "projects/{project}/agent/sessions/{session}/contexts/{context}".format(
project=project, session=session, context=context,
)
actual = SessionsClient.context_path(project, session, context)
assert expected == actual
def test_parse_context_path():
expected = {
"project": "octopus",
"session": "oyster",
"context": "nudibranch",
}
path = SessionsClient.context_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_context_path(path)
assert expected == actual
def test_intent_path():
project = "cuttlefish"
intent = "mussel"
expected = "projects/{project}/agent/intents/{intent}".format(
project=project, intent=intent,
)
actual = SessionsClient.intent_path(project, intent)
assert expected == actual
def test_parse_intent_path():
expected = {
"project": "winkle",
"intent": "nautilus",
}
path = SessionsClient.intent_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_intent_path(path)
assert expected == actual
def test_session_path():
project = "scallop"
session = "abalone"
expected = "projects/{project}/agent/sessions/{session}".format(
project=project, session=session,
)
actual = SessionsClient.session_path(project, session)
assert expected == actual
def test_parse_session_path():
expected = {
"project": "squid",
"session": "clam",
}
path = SessionsClient.session_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_session_path(path)
assert expected == actual
def test_session_entity_type_path():
project = "whelk"
session = "octopus"
entity_type = "oyster"
expected = "projects/{project}/agent/sessions/{session}/entityTypes/{entity_type}".format(
project=project, session=session, entity_type=entity_type,
)
actual = SessionsClient.session_entity_type_path(project, session, entity_type)
assert expected == actual
def test_parse_session_entity_type_path():
expected = {
"project": "nudibranch",
"session": "cuttlefish",
"entity_type": "mussel",
}
path = SessionsClient.session_entity_type_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_session_entity_type_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SessionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = SessionsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = SessionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = SessionsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = SessionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = SessionsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = SessionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = SessionsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SessionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = SessionsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SessionsTransport, "_prep_wrapped_messages"
) as prep:
client = SessionsClient(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SessionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = SessionsClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| 36.72315 | 106 | 0.677022 |
e04d14fd29d1f84173279757815dad575ccf6699 | 147 | py | Python | 16.py | abphilip-codes/Codechef_DSA | ee93243fdd1c16ce8d8163e92f0764bd5a64436c | [
"MIT"
] | 1 | 2021-11-25T13:39:49.000Z | 2021-11-25T13:39:49.000Z | 16.py | abphilip-codes/Codechef_DSA | ee93243fdd1c16ce8d8163e92f0764bd5a64436c | [
"MIT"
] | null | null | null | 16.py | abphilip-codes/Codechef_DSA | ee93243fdd1c16ce8d8163e92f0764bd5a64436c | [
"MIT"
] | 1 | 2021-07-14T17:51:24.000Z | 2021-07-14T17:51:24.000Z | # https://www.codechef.com/LRNDSA01/problems/TEST
# Brute Force Approach
while True:
n = int(input())
if(n!=42): print(n)
else: break | 18.375 | 49 | 0.646259 |
ec6e2bd1a9d46117dbc08f323639192cfc7f1146 | 2,130 | py | Python | tests/brainview/test_atlasviewer.py | dfsp-spirit/brainview | fdb45fdb694b098c99e7d4db4aaeb56bb5902144 | [
"MIT"
] | 3 | 2019-01-20T05:58:04.000Z | 2020-09-02T12:37:14.000Z | tests/brainview/test_atlasviewer.py | dfsp-spirit/brainview | fdb45fdb694b098c99e7d4db4aaeb56bb5902144 | [
"MIT"
] | null | null | null | tests/brainview/test_atlasviewer.py | dfsp-spirit/brainview | fdb45fdb694b098c99e7d4db4aaeb56bb5902144 | [
"MIT"
] | null | null | null | # Tests for the atlasviewer script.
#
# These tests require the package `pytest-console-scripts`.
import os
import pytest
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data')
def test_atlasviewer_help(script_runner):
ret = script_runner.run('atlasviewer', '--help')
assert ret.success
assert 'usage' in ret.stdout
assert 'View brain label data or brain annotation' in ret.stdout
assert ret.stderr == ''
def test_atlasviewer_label_cortex(script_runner):
ret = script_runner.run('atlasviewer', 'subject1', 'label', 'cortex', '-d', TEST_DATA_DIR, '-v')
assert ret.success
assert 'Verbosity' in ret.stdout
assert 'Loading label cortex for subject subject1 from subjects dir' in ret.stdout
assert 'displaying on surface white for hemisphere both' in ret.stdout
assert ret.stderr == ''
def test_atlasviewer_label_cortex_nonverbose(script_runner):
ret = script_runner.run('atlasviewer', 'subject1', 'label', 'cortex', '-d', TEST_DATA_DIR)
assert ret.success
assert not 'Verbosity' in ret.stdout
assert not 'Loading label cortex for subject subject1 from subjects dir' in ret.stdout
assert not 'displaying on surface white for hemisphere both' in ret.stdout
assert ret.stderr == ''
def test_atlasviewer_annot_aparc(script_runner):
ret = script_runner.run('atlasviewer', 'subject1', 'atlas', 'aparc', '-d', TEST_DATA_DIR, '-v')
assert ret.success
assert 'Verbosity' in ret.stdout
assert 'Loading atlas aparc for subject subject1 from subjects dir' in ret.stdout
assert 'displaying on surface white for hemisphere both' in ret.stdout
assert ret.stderr == ''
def test_atlasviewer_annot_aparc_nonverbose(script_runner):
ret = script_runner.run('atlasviewer', 'subject1', 'atlas', 'aparc', '-d', TEST_DATA_DIR)
assert ret.success
assert not 'Verbosity' in ret.stdout
assert not 'Loading atlas aparc for subject subject1 from subjects dir' in ret.stdout
assert not 'displaying on surface white for hemisphere both' in ret.stdout
assert ret.stderr == ''
| 40.188679 | 100 | 0.732864 |
774f04ecfbe56ed5980d2be94203ca83c5f51d46 | 2,669 | py | Python | chrono/refiners/en/merge_date_time.py | wanasit/chrono-python | 2e3bb44f876fc381a73050d9dda58de296306dc0 | [
"MIT"
] | 20 | 2016-07-21T11:27:46.000Z | 2022-01-30T16:12:46.000Z | chrono/refiners/en/merge_date_time.py | wanasit/chrono-python | 2e3bb44f876fc381a73050d9dda58de296306dc0 | [
"MIT"
] | 2 | 2016-11-21T05:46:19.000Z | 2019-03-23T06:41:21.000Z | chrono/refiners/en/merge_date_time.py | wanasit/chrono-python | 2e3bb44f876fc381a73050d9dda58de296306dc0 | [
"MIT"
] | 5 | 2015-06-28T07:21:31.000Z | 2020-07-28T19:54:29.000Z | #!/usr/bin/env python
# -*- coding: utf8 -*-
import re
from ..refiner import Refiner
from ..refiner import ParsedResult
from ..refiner import ParsedComponent
class ENMergeDateTimeRefiner(Refiner):
def refine(self, results, text, options):
if len(results) < 2: return results
merged_results = []
prev_result = None
curr_result = None
i = 1
while i < len(results):
prev_result = results[i-1]
curr_result = results[i]
if is_able_to_merge(text, prev_result, curr_result):
if is_time_only(curr_result) and is_date_only(prev_result):
prev_result = merge_result(text, prev_result, curr_result)
curr_result = None
i += 1
elif is_time_only(prev_result) and is_date_only(curr_result):
prev_result = merge_result(text, curr_result, prev_result)
curr_result = None
i += 1
merged_results.append(prev_result)
i += 1
if curr_result:
merged_results.append(curr_result)
return merged_results
def is_date_only(result):
return not result.start.is_certain('hour')
def is_time_only(result):
return not result.start.is_certain('day') and not result.start.is_certain('day_of_week')
def is_able_to_merge(text, result1, result2):
pattern = re.compile("\s*(T|at|on|of|,)?\s*", re.IGNORECASE)
text_between = text[result1.index + len(result1.text) : result2.index]
return pattern.match(text_between)
def merge_result(text, date_result, time_result):
result = ParsedResult()
begin_index = min(date_result.index, time_result.index)
end_index = max(date_result.index + len(date_result.text), time_result.index + len(time_result.text))
result.index = begin_index
result.text = text[begin_index : end_index]
result.start = date_result.start.copy()
result.start.assign('hour', time_result.start.get('hour'))
result.start.assign('minute', time_result.start.get('minute'))
result.start.assign('second', time_result.start.get('second'))
if time_result.end or date_result.end:
time_result_end = time_result.end if time_result.end else time_result.start
date_result_end = date_result.end if date_result.end else date_result.start
result.end = date_result_end.copy()
result.end.assign('hour', time_result_end.get('hour'))
result.end.assign('minute', time_result_end.get('minute'))
result.end.assign('second', time_result_end.get('second'))
return result
| 33.3625 | 107 | 0.650806 |
40880009cf8eccebefcdc58a5d39f7380138be17 | 552 | py | Python | src/ychaos/testplan/__init__.py | eisenhowerj/ychaos | de7572e35d89eedb5d7d2ad6a8e1fda52179eccc | [
"Apache-2.0"
] | 8 | 2021-07-21T15:37:48.000Z | 2022-03-03T14:43:09.000Z | src/ychaos/testplan/__init__.py | eisenhowerj/ychaos | de7572e35d89eedb5d7d2ad6a8e1fda52179eccc | [
"Apache-2.0"
] | 102 | 2021-07-20T16:08:29.000Z | 2022-03-25T07:28:37.000Z | src/ychaos/testplan/__init__.py | eisenhowerj/ychaos | de7572e35d89eedb5d7d2ad6a8e1fda52179eccc | [
"Apache-2.0"
] | 8 | 2021-07-20T13:37:46.000Z | 2022-02-18T01:44:52.000Z | # Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
from enum import Enum
from typing import Any, Dict
from pydantic import BaseModel
class SystemState(Enum):
STEADY = "STEADY"
CHAOS = "CHAOS"
RECOVERED = "RECOVERED"
class SchemaModel(BaseModel):
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], model) -> None:
for prop in schema.get("properties", {}).values():
prop.pop("title", None)
| 25.090909 | 105 | 0.663043 |
2d137edf48a4b769eb0d2f8c99aee125b00f47d5 | 16,409 | py | Python | tests/test_feedexport.py | kooy/scrapy | 776129a9513e2b6ab6f7e8cda1dd3de66cbbff44 | [
"BSD-3-Clause"
] | 1 | 2017-03-04T20:21:43.000Z | 2017-03-04T20:21:43.000Z | tests/test_feedexport.py | kooy/scrapy | 776129a9513e2b6ab6f7e8cda1dd3de66cbbff44 | [
"BSD-3-Clause"
] | null | null | null | tests/test_feedexport.py | kooy/scrapy | 776129a9513e2b6ab6f7e8cda1dd3de66cbbff44 | [
"BSD-3-Clause"
] | 1 | 2021-01-24T17:57:21.000Z | 2021-01-24T17:57:21.000Z | from __future__ import absolute_import
import os
import csv
import json
from io import BytesIO
import tempfile
import shutil
from six.moves.urllib.parse import urlparse
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.crawler import CrawlerRunner
from scrapy.settings import Settings
from tests.mockserver import MockServer
from w3lib.url import path_to_file_uri
import scrapy
from scrapy.extensions.feedexport import (
IFeedStorage, FileFeedStorage, FTPFeedStorage,
S3FeedStorage, StdoutFeedStorage,
BlockingFeedStorage)
from scrapy.utils.test import assert_aws_environ, get_s3_content_and_delete, get_crawler
from scrapy.utils.python import to_native_str
class FileFeedStorageTest(unittest.TestCase):
def test_store_file_uri(self):
path = os.path.abspath(self.mktemp())
uri = path_to_file_uri(path)
return self._assert_stores(FileFeedStorage(uri), path)
def test_store_file_uri_makedirs(self):
path = os.path.abspath(self.mktemp())
path = os.path.join(path, 'more', 'paths', 'file.txt')
uri = path_to_file_uri(path)
return self._assert_stores(FileFeedStorage(uri), path)
def test_store_direct_path(self):
path = os.path.abspath(self.mktemp())
return self._assert_stores(FileFeedStorage(path), path)
def test_store_direct_path_relative(self):
path = self.mktemp()
return self._assert_stores(FileFeedStorage(path), path)
def test_interface(self):
path = self.mktemp()
st = FileFeedStorage(path)
verifyObject(IFeedStorage, st)
@defer.inlineCallbacks
def _assert_stores(self, storage, path):
spider = scrapy.Spider("default")
file = storage.open(spider)
file.write(b"content")
yield storage.store(file)
self.assertTrue(os.path.exists(path))
try:
with open(path, 'rb') as fp:
self.assertEqual(fp.read(), b"content")
finally:
os.unlink(path)
class FTPFeedStorageTest(unittest.TestCase):
def test_store(self):
uri = os.environ.get('FEEDTEST_FTP_URI')
path = os.environ.get('FEEDTEST_FTP_PATH')
if not (uri and path):
raise unittest.SkipTest("No FTP server available for testing")
st = FTPFeedStorage(uri)
verifyObject(IFeedStorage, st)
return self._assert_stores(st, path)
@defer.inlineCallbacks
def _assert_stores(self, storage, path):
spider = scrapy.Spider("default")
file = storage.open(spider)
file.write(b"content")
yield storage.store(file)
self.assertTrue(os.path.exists(path))
try:
with open(path, 'rb') as fp:
self.assertEqual(fp.read(), b"content")
# again, to check s3 objects are overwritten
yield storage.store(BytesIO(b"new content"))
with open(path, 'rb') as fp:
self.assertEqual(fp.read(), b"new content")
finally:
os.unlink(path)
class BlockingFeedStorageTest(unittest.TestCase):
def get_test_spider(self, settings=None):
class TestSpider(scrapy.Spider):
name = 'test_spider'
crawler = get_crawler(settings_dict=settings)
spider = TestSpider.from_crawler(crawler)
return spider
def test_default_temp_dir(self):
b = BlockingFeedStorage()
tmp = b.open(self.get_test_spider())
tmp_path = os.path.dirname(tmp.name)
self.assertEqual(tmp_path, tempfile.gettempdir())
def test_temp_file(self):
b = BlockingFeedStorage()
tests_path = os.path.dirname(os.path.abspath(__file__))
spider = self.get_test_spider({'FEED_TEMPDIR': tests_path})
tmp = b.open(spider)
tmp_path = os.path.dirname(tmp.name)
self.assertEqual(tmp_path, tests_path)
def test_invalid_folder(self):
b = BlockingFeedStorage()
tests_path = os.path.dirname(os.path.abspath(__file__))
invalid_path = os.path.join(tests_path, 'invalid_path')
spider = self.get_test_spider({'FEED_TEMPDIR': invalid_path})
self.assertRaises(OSError, b.open, spider=spider)
class S3FeedStorageTest(unittest.TestCase):
@defer.inlineCallbacks
def test_store(self):
assert_aws_environ()
uri = os.environ.get('S3_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No S3 URI available for testing")
storage = S3FeedStorage(uri)
verifyObject(IFeedStorage, storage)
file = storage.open(scrapy.Spider("default"))
expected_content = b"content: \xe2\x98\x83"
file.write(expected_content)
yield storage.store(file)
u = urlparse(uri)
content = get_s3_content_and_delete(u.hostname, u.path[1:])
self.assertEqual(content, expected_content)
class StdoutFeedStorageTest(unittest.TestCase):
@defer.inlineCallbacks
def test_store(self):
out = BytesIO()
storage = StdoutFeedStorage('stdout:', _stdout=out)
file = storage.open(scrapy.Spider("default"))
file.write(b"content")
yield storage.store(file)
self.assertEqual(out.getvalue(), b"content")
class FeedExportTest(unittest.TestCase):
class MyItem(scrapy.Item):
foo = scrapy.Field()
egg = scrapy.Field()
baz = scrapy.Field()
@defer.inlineCallbacks
def run_and_export(self, spider_cls, settings=None):
""" Run spider with specified settings; return exported data. """
tmpdir = tempfile.mkdtemp()
res_name = tmpdir + '/res'
defaults = {
'FEED_URI': 'file://' + res_name,
'FEED_FORMAT': 'csv',
}
defaults.update(settings or {})
try:
with MockServer() as s:
runner = CrawlerRunner(Settings(defaults))
yield runner.crawl(spider_cls)
with open(res_name, 'rb') as f:
defer.returnValue(f.read())
finally:
shutil.rmtree(tmpdir)
@defer.inlineCallbacks
def exported_data(self, items, settings):
"""
Return exported data which a spider yielding ``items`` would return.
"""
class TestSpider(scrapy.Spider):
name = 'testspider'
start_urls = ['http://localhost:8998/']
def parse(self, response):
for item in items:
yield item
data = yield self.run_and_export(TestSpider, settings)
defer.returnValue(data)
@defer.inlineCallbacks
def exported_no_data(self, settings):
"""
Return exported data which a spider yielding no ``items`` would return.
"""
class TestSpider(scrapy.Spider):
name = 'testspider'
start_urls = ['http://localhost:8998/']
def parse(self, response):
pass
data = yield self.run_and_export(TestSpider, settings)
defer.returnValue(data)
@defer.inlineCallbacks
def assertExportedCsv(self, items, header, rows, settings=None, ordered=True):
settings = settings or {}
settings.update({'FEED_FORMAT': 'csv'})
data = yield self.exported_data(items, settings)
reader = csv.DictReader(to_native_str(data).splitlines())
got_rows = list(reader)
if ordered:
self.assertEqual(reader.fieldnames, header)
else:
self.assertEqual(set(reader.fieldnames), set(header))
self.assertEqual(rows, got_rows)
@defer.inlineCallbacks
def assertExportedJsonLines(self, items, rows, settings=None):
settings = settings or {}
settings.update({'FEED_FORMAT': 'jl'})
data = yield self.exported_data(items, settings)
parsed = [json.loads(to_native_str(line)) for line in data.splitlines()]
rows = [{k: v for k, v in row.items() if v} for row in rows]
self.assertEqual(rows, parsed)
@defer.inlineCallbacks
def assertExportedXml(self, items, rows, settings=None):
settings = settings or {}
settings.update({'FEED_FORMAT': 'xml'})
data = yield self.exported_data(items, settings)
rows = [{k: v for k, v in row.items() if v} for row in rows]
import lxml.etree
root = lxml.etree.fromstring(data)
got_rows = [{e.tag: e.text for e in it} for it in root.findall('item')]
self.assertEqual(rows, got_rows)
def _load_until_eof(self, data, load_func):
bytes_output = BytesIO(data)
result = []
while True:
try:
result.append(load_func(bytes_output))
except EOFError:
break
return result
@defer.inlineCallbacks
def assertExportedPickle(self, items, rows, settings=None):
settings = settings or {}
settings.update({'FEED_FORMAT': 'pickle'})
data = yield self.exported_data(items, settings)
expected = [{k: v for k, v in row.items() if v} for row in rows]
import pickle
result = self._load_until_eof(data, load_func=pickle.load)
self.assertEqual(expected, result)
@defer.inlineCallbacks
def assertExportedMarshal(self, items, rows, settings=None):
settings = settings or {}
settings.update({'FEED_FORMAT': 'marshal'})
data = yield self.exported_data(items, settings)
expected = [{k: v for k, v in row.items() if v} for row in rows]
import marshal
result = self._load_until_eof(data, load_func=marshal.load)
self.assertEqual(expected, result)
@defer.inlineCallbacks
def assertExported(self, items, header, rows, settings=None, ordered=True):
yield self.assertExportedCsv(items, header, rows, settings, ordered)
yield self.assertExportedJsonLines(items, rows, settings)
yield self.assertExportedXml(items, rows, settings)
yield self.assertExportedPickle(items, rows, settings)
@defer.inlineCallbacks
def test_export_items(self):
# feed exporters use field names from Item
items = [
self.MyItem({'foo': 'bar1', 'egg': 'spam1'}),
self.MyItem({'foo': 'bar2', 'egg': 'spam2', 'baz': 'quux2'}),
]
rows = [
{'egg': 'spam1', 'foo': 'bar1', 'baz': ''},
{'egg': 'spam2', 'foo': 'bar2', 'baz': 'quux2'}
]
header = self.MyItem.fields.keys()
yield self.assertExported(items, header, rows, ordered=False)
@defer.inlineCallbacks
def test_export_no_items_not_store_empty(self):
formats = ('json',
'jsonlines',
'xml',
'csv',)
for fmt in formats:
settings = {'FEED_FORMAT': fmt}
data = yield self.exported_no_data(settings)
self.assertEqual(data, b'')
@defer.inlineCallbacks
def test_export_no_items_store_empty(self):
formats = (
('json', b'[\n\n]'),
('jsonlines', b''),
('xml', b'<?xml version="1.0" encoding="utf-8"?>\n<items></items>'),
('csv', b''),
)
for fmt, expctd in formats:
settings = {'FEED_FORMAT': fmt, 'FEED_STORE_EMPTY': True}
data = yield self.exported_no_data(settings)
self.assertEqual(data, expctd)
@defer.inlineCallbacks
def test_export_multiple_item_classes(self):
class MyItem2(scrapy.Item):
foo = scrapy.Field()
hello = scrapy.Field()
items = [
self.MyItem({'foo': 'bar1', 'egg': 'spam1'}),
MyItem2({'hello': 'world2', 'foo': 'bar2'}),
self.MyItem({'foo': 'bar3', 'egg': 'spam3', 'baz': 'quux3'}),
{'hello': 'world4', 'egg': 'spam4'},
]
# by default, Scrapy uses fields of the first Item for CSV and
# all fields for JSON Lines
header = self.MyItem.fields.keys()
rows_csv = [
{'egg': 'spam1', 'foo': 'bar1', 'baz': ''},
{'egg': '', 'foo': 'bar2', 'baz': ''},
{'egg': 'spam3', 'foo': 'bar3', 'baz': 'quux3'},
{'egg': 'spam4', 'foo': '', 'baz': ''},
]
rows_jl = [dict(row) for row in items]
yield self.assertExportedCsv(items, header, rows_csv, ordered=False)
yield self.assertExportedJsonLines(items, rows_jl)
# edge case: FEED_EXPORT_FIELDS==[] means the same as default None
settings = {'FEED_EXPORT_FIELDS': []}
yield self.assertExportedCsv(items, header, rows_csv, ordered=False)
yield self.assertExportedJsonLines(items, rows_jl, settings)
# it is possible to override fields using FEED_EXPORT_FIELDS
header = ["foo", "baz", "hello"]
settings = {'FEED_EXPORT_FIELDS': header}
rows = [
{'foo': 'bar1', 'baz': '', 'hello': ''},
{'foo': 'bar2', 'baz': '', 'hello': 'world2'},
{'foo': 'bar3', 'baz': 'quux3', 'hello': ''},
{'foo': '', 'baz': '', 'hello': 'world4'},
]
yield self.assertExported(items, header, rows,
settings=settings, ordered=True)
@defer.inlineCallbacks
def test_export_dicts(self):
# When dicts are used, only keys from the first row are used as
# a header for CSV, and all fields are used for JSON Lines.
items = [
{'foo': 'bar', 'egg': 'spam'},
{'foo': 'bar', 'egg': 'spam', 'baz': 'quux'},
]
rows_csv = [
{'egg': 'spam', 'foo': 'bar'},
{'egg': 'spam', 'foo': 'bar'}
]
rows_jl = items
yield self.assertExportedCsv(items, ['egg', 'foo'], rows_csv, ordered=False)
yield self.assertExportedJsonLines(items, rows_jl)
@defer.inlineCallbacks
def test_export_feed_export_fields(self):
# FEED_EXPORT_FIELDS option allows to order export fields
# and to select a subset of fields to export, both for Items and dicts.
for item_cls in [self.MyItem, dict]:
items = [
item_cls({'foo': 'bar1', 'egg': 'spam1'}),
item_cls({'foo': 'bar2', 'egg': 'spam2', 'baz': 'quux2'}),
]
# export all columns
settings = {'FEED_EXPORT_FIELDS': 'foo,baz,egg'}
rows = [
{'egg': 'spam1', 'foo': 'bar1', 'baz': ''},
{'egg': 'spam2', 'foo': 'bar2', 'baz': 'quux2'}
]
yield self.assertExported(items, ['foo', 'baz', 'egg'], rows,
settings=settings, ordered=True)
# export a subset of columns
settings = {'FEED_EXPORT_FIELDS': 'egg,baz'}
rows = [
{'egg': 'spam1', 'baz': ''},
{'egg': 'spam2', 'baz': 'quux2'}
]
yield self.assertExported(items, ['egg', 'baz'], rows,
settings=settings, ordered=True)
@defer.inlineCallbacks
def test_export_encoding(self):
items = [dict({'foo': u'Test\xd6'})]
header = ['foo']
formats = {
'json': u'[\n{"foo": "Test\\u00d6"}\n]'.encode('utf-8'),
'jsonlines': u'{"foo": "Test\\u00d6"}\n'.encode('utf-8'),
'xml': u'<?xml version="1.0" encoding="utf-8"?>\n<items><item><foo>Test\xd6</foo></item></items>'.encode('utf-8'),
'csv': u'foo\r\nTest\xd6\r\n'.encode('utf-8'),
}
for format in formats:
settings = {'FEED_FORMAT': format}
data = yield self.exported_data(items, settings)
self.assertEqual(formats[format], data)
formats = {
'json': u'[\n{"foo": "Test\xd6"}\n]'.encode('latin-1'),
'jsonlines': u'{"foo": "Test\xd6"}\n'.encode('latin-1'),
'xml': u'<?xml version="1.0" encoding="latin-1"?>\n<items><item><foo>Test\xd6</foo></item></items>'.encode('latin-1'),
'csv': u'foo\r\nTest\xd6\r\n'.encode('latin-1'),
}
for format in formats:
settings = {'FEED_FORMAT': format, 'FEED_EXPORT_ENCODING': 'latin-1'}
data = yield self.exported_data(items, settings)
self.assertEqual(formats[format], data)
| 36.464444 | 130 | 0.589372 |
6ac72ee8f0686714f026f863150b9a539fbb86e4 | 3,512 | py | Python | moto/cognitoidentity/responses.py | andormarkus/moto | 67cda6d7d6f42118ccd7e2170e7ff0a1f92fa6a6 | [
"Apache-2.0"
] | null | null | null | moto/cognitoidentity/responses.py | andormarkus/moto | 67cda6d7d6f42118ccd7e2170e7ff0a1f92fa6a6 | [
"Apache-2.0"
] | null | null | null | moto/cognitoidentity/responses.py | andormarkus/moto | 67cda6d7d6f42118ccd7e2170e7ff0a1f92fa6a6 | [
"Apache-2.0"
] | null | null | null | from moto.core.responses import BaseResponse
from .models import cognitoidentity_backends
from .utils import get_random_identity_id
class CognitoIdentityResponse(BaseResponse):
@property
def backend(self):
return cognitoidentity_backends[self.region]
def create_identity_pool(self):
identity_pool_name = self._get_param("IdentityPoolName")
allow_unauthenticated_identities = self._get_param(
"AllowUnauthenticatedIdentities"
)
supported_login_providers = self._get_param("SupportedLoginProviders")
developer_provider_name = self._get_param("DeveloperProviderName")
open_id_connect_provider_arns = self._get_param("OpenIdConnectProviderARNs")
cognito_identity_providers = self._get_param("CognitoIdentityProviders")
saml_provider_arns = self._get_param("SamlProviderARNs")
pool_tags = self._get_param("IdentityPoolTags")
return self.backend.create_identity_pool(
identity_pool_name=identity_pool_name,
allow_unauthenticated_identities=allow_unauthenticated_identities,
supported_login_providers=supported_login_providers,
developer_provider_name=developer_provider_name,
open_id_connect_provider_arns=open_id_connect_provider_arns,
cognito_identity_providers=cognito_identity_providers,
saml_provider_arns=saml_provider_arns,
tags=pool_tags,
)
def update_identity_pool(self):
pool_id = self._get_param("IdentityPoolId")
pool_name = self._get_param("IdentityPoolName")
allow_unauthenticated = self._get_bool_param("AllowUnauthenticatedIdentities")
login_providers = self._get_param("SupportedLoginProviders")
provider_name = self._get_param("DeveloperProviderName")
provider_arns = self._get_param("OpenIdConnectProviderARNs")
identity_providers = self._get_param("CognitoIdentityProviders")
saml_providers = self._get_param("SamlProviderARNs")
pool_tags = self._get_param("IdentityPoolTags")
return self.backend.update_identity_pool(
identity_pool_id=pool_id,
identity_pool_name=pool_name,
allow_unauthenticated=allow_unauthenticated,
login_providers=login_providers,
provider_name=provider_name,
provider_arns=provider_arns,
identity_providers=identity_providers,
saml_providers=saml_providers,
tags=pool_tags,
)
def get_id(self):
return self.backend.get_id(identity_pool_id=self._get_param("IdentityPoolId"))
def describe_identity_pool(self):
return self.backend.describe_identity_pool(self._get_param("IdentityPoolId"))
def get_credentials_for_identity(self):
return self.backend.get_credentials_for_identity(self._get_param("IdentityId"))
def get_open_id_token_for_developer_identity(self):
return cognitoidentity_backends[
self.region
].get_open_id_token_for_developer_identity(
self._get_param("IdentityId") or get_random_identity_id(self.region)
)
def get_open_id_token(self):
return self.backend.get_open_id_token(
self._get_param("IdentityId") or get_random_identity_id(self.region)
)
def list_identities(self):
return self.backend.list_identities(
self._get_param("IdentityPoolId") or get_random_identity_id(self.region)
)
| 42.829268 | 87 | 0.725228 |
ab392f461e782fba348ea0cb6ee3efebe4f42526 | 2,181 | py | Python | grappelli/dashboard/templatetags/grp_dashboard_tags.py | fragaria/django-grappelli | 3bac1d1a63260d6df0bfa15ef7c23aef5e3e9b49 | [
"BSD-3-Clause"
] | null | null | null | grappelli/dashboard/templatetags/grp_dashboard_tags.py | fragaria/django-grappelli | 3bac1d1a63260d6df0bfa15ef7c23aef5e3e9b49 | [
"BSD-3-Clause"
] | null | null | null | grappelli/dashboard/templatetags/grp_dashboard_tags.py | fragaria/django-grappelli | 3bac1d1a63260d6df0bfa15ef7c23aef5e3e9b49 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
"""
Dashboard template tags, the following dashboard tags are available:
* ``{% grp_render_dashboard %}``
* ``{% grp_render_dashboard_module %}``
To load the dashboard tags: ``{% load grp_dashboard_tags %}``.
"""
# DJANGO IMPORTS
from django import template
from django.core.urlresolvers import reverse
# GRAPPELLI IMPORTS
from grappelli.dashboard.utils import get_admin_site_name, get_index_dashboard
register = template.Library()
tag_func = register.inclusion_tag('grappelli/dashboard/dummy.html', takes_context=True)
def grp_render_dashboard(context, location='index', dashboard=None):
"""
Template tag that renders the dashboard, it takes two optional arguments:
``location``
The location of the dashboard, it can be 'index' (for the admin index
dashboard) or 'app_index' (for the app index dashboard), the default
value is 'index'.
``dashboard``
An instance of ``Dashboard``, if not given, the dashboard is retrieved
with the ``get_index_dashboard`` or ``get_app_index_dashboard``
functions, depending on the ``location`` argument.
"""
if dashboard is None:
dashboard = get_index_dashboard(context)
dashboard.init_with_context(context)
context.update({
'template': dashboard.template,
'dashboard': dashboard,
'admin_url': reverse('%s:index' % get_admin_site_name(context)),
})
return context
grp_render_dashboard = tag_func(grp_render_dashboard)
def grp_render_dashboard_module(context, module, index=None, subindex=None):
"""
Template tag that renders a given dashboard module, it takes a
``DashboardModule`` instance as first parameter and an integer ``index`` as
second parameter, that is the index of the module in the dashboard.
"""
module.init_with_context(context)
context.update({
'template': module.template,
'module': module,
'index': index,
'subindex': subindex,
'admin_url': reverse('%s:index' % get_admin_site_name(context)),
})
return context
grp_render_dashboard_module = tag_func(grp_render_dashboard_module)
| 31.157143 | 87 | 0.695552 |
def094ccaabe2da0a71e6ad70a1f0a5126e7e21e | 4,327 | py | Python | contrib/seeds/generate-seeds.py | wealthsilo/WealthSilo | 318df3155e8ede3e2c34fd4b02bf4a7ee8d53e65 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | wealthsilo/WealthSilo | 318df3155e8ede3e2c34fd4b02bf4a7ee8d53e65 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | wealthsilo/WealthSilo | 318df3155e8ede3e2c34fd4b02bf4a7ee8d53e65 | [
"MIT"
] | 1 | 2018-08-10T23:57:27.000Z | 2018-08-10T23:57:27.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 45595)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 5520)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.355072 | 98 | 0.581465 |
c467220833b41e073acaf6043b8b6d2213aa3bf8 | 208 | py | Python | python/testData/quickFixes/PyRemoveParameterQuickFixTest/topLevelOverloadsAndImplementation.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/quickFixes/PyRemoveParameterQuickFixTest/topLevelOverloadsAndImplementation.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/quickFixes/PyRemoveParameterQuickFixTest/topLevelOverloadsAndImplementation.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | from typing import overload
@overload
def foo() -> None:
pass
@overload
def foo(value: int) -> str:
pass
@overload
def foo(value: str) -> str:
pass
def foo(va<caret>lue=None):
return None | 12.235294 | 27 | 0.644231 |
e59255febc78ce9a7a46cbbb7a50761046cca30b | 2,667 | py | Python | custom_components/vapix/doors.py | jadson179/vapix | 152bd7d929a9d8dac0b5f35b02609bd46d7696fe | [
"MIT"
] | null | null | null | custom_components/vapix/doors.py | jadson179/vapix | 152bd7d929a9d8dac0b5f35b02609bd46d7696fe | [
"MIT"
] | 6 | 2021-02-15T21:47:25.000Z | 2021-06-22T11:48:41.000Z | custom_components/vapix/doors.py | jadson179/vapix | 152bd7d929a9d8dac0b5f35b02609bd46d7696fe | [
"MIT"
] | null | null | null | from requests import post
from requests.auth import HTTPDigestAuth
from homeassistant.core import HomeAssistant
import json
from homeassistant.core import ServiceCall
def open_remote_door(call: ServiceCall):
ip = call.data.get("ip", "")
username = call.data.get("username", "")
password = call.data.get("password", "")
doorid = call.data.get("doorid", "")
headers = {
"Content-Type": "application/json",
}
payload = {"tdc:AccessDoor": {"Token": doorid}}
post(
"http://{ip}/vapix/doorcontrol".format(ip=ip),
data=json.dumps(payload),
headers=headers,
auth=HTTPDigestAuth(username, password)
)
def access(call: ServiceCall):
ip = call.data.get("ip", "")
username = call.data.get("username", "")
password = call.data.get("password", "")
doorid = call.data.get("doorid", "")
headers = {
"Content-Type": "application/json",
}
payload = {"tdc:AccessDoor": {"Token": doorid}}
post(
"http://{ip}/vapix/doorcontrol".format(ip=ip),
data=json.dumps(payload),
headers=headers,
auth=HTTPDigestAuth(username, password)
)
def double_lock(call: ServiceCall):
ip = call.data.get("ip", "")
username = call.data.get("username", "")
password = call.data.get("password", "")
doorid = call.data.get("doorid", "")
headers = {
"Content-Type": "application/json",
}
payload = {"tdc:DoubleLockDoor": {"Token": doorid}}
post(
"http://{ip}/vapix/doorcontrol".format(ip=ip),
data=json.dumps(payload),
headers=headers,
auth=HTTPDigestAuth(username, password)
)
def unlock(call: ServiceCall):
ip = call.data.get("ip", "")
username = call.data.get("username", "")
password = call.data.get("password", "")
doorid = call.data.get("doorid", "")
headers = {
"Content-Type": "application/json",
}
payload = {"tdc:UnlockDoor": {"Token": doorid}}
post(
"http://{ip}/vapix/doorcontrol".format(ip=ip),
data=json.dumps(payload),
headers=headers,
auth=HTTPDigestAuth(username, password)
)
def lock(call: ServiceCall):
ip = call.data.get("ip", "")
username = call.data.get("username", "")
password = call.data.get("password", "")
doorid = call.data.get("doorid", "")
headers = {
"Content-Type": "application/json",
}
payload = {"tdc:LockDoor": {"Token": doorid}}
post(
"http://{ip}/vapix/doorcontrol".format(ip=ip),
data=json.dumps(payload),
headers=headers,
auth=HTTPDigestAuth(username, password)
)
| 25.160377 | 56 | 0.592801 |
673abf999d12db3c2144b3e2c6a38ae8091b218b | 8,532 | py | Python | examples/mnli_example/main.distill.py | mariushes/TextBrewer | 182397e80e08c4c794edd17e8406852474ffa157 | [
"Apache-2.0"
] | null | null | null | examples/mnli_example/main.distill.py | mariushes/TextBrewer | 182397e80e08c4c794edd17e8406852474ffa157 | [
"Apache-2.0"
] | null | null | null | examples/mnli_example/main.distill.py | mariushes/TextBrewer | 182397e80e08c4c794edd17e8406852474ffa157 | [
"Apache-2.0"
] | null | null | null | import logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os,random
import numpy as np
import torch
from utils_glue import output_modes, processors
from transformers import BertConfig, AdamW, get_linear_schedule_with_warmup, BertTokenizer
import config
from utils import divide_parameters, load_and_cache_examples
from modeling import BertForGLUESimple, BertForGLUESimpleAdaptorTrain, BertForGLUESimpleAdaptor
from textbrewer import DistillationConfig, TrainingConfig, GeneralDistiller
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from tqdm import tqdm
from utils_glue import compute_metrics
from functools import partial
import re
from predict_function import predict
from parse import parse_model_config, MODEL_CLASSES
def args_check(args):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
logger.warning("Output directory () already exists and is not empty.")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.local_rank == -1 or args.no_cuda:
if not args.no_cuda and not torch.cuda.is_available():
raise ValueError("No CUDA available!")
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
args.n_gpu = n_gpu
args.device = device
return device, n_gpu
def main():
#parse arguments
config.parse()
args = config.args
for k,v in vars(args).items():
logger.info(f"{k}:{v}")
#set seeds
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
#arguments check
device, n_gpu = args_check(args)
os.makedirs(args.output_dir, exist_ok=True)
forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
args.forward_batch_size = forward_batch_size
#load config
teachers_and_student = parse_model_config(args.model_config_json)
#Prepare GLUE task
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
#read data
train_dataset = None
eval_datasets = None
num_train_steps = None
tokenizer_S = teachers_and_student['student']['tokenizer']
prefix_S = teachers_and_student['student']['prefix']
if args.do_train:
train_dataset = load_and_cache_examples(
args, args.task_name,tokenizer_S, prefix=prefix_S,evaluate=False)
if args.do_predict:
eval_datasets = []
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
for eval_task in eval_task_names:
eval_datasets.append(load_and_cache_examples(args, eval_task, tokenizer_S, prefix=prefix_S, evaluate=True))
logger.info("Data loaded")
#Build Model and load checkpoint
if args.do_train:
if True:
teacher = teachers_and_student['teachers'][0]
model_type_T = teacher['model_type']
model_config_T = teacher['config']
checkpoint_T = teacher['checkpoint']
_,_,model_class_T = MODEL_CLASSES[model_type_T]
model_T = model_class_T(model_config_T, num_labels=num_labels)
state_dict_T = torch.load(checkpoint_T,map_location='cpu')
missing_keys, un_keys = model_T.load_state_dict(state_dict_T,strict=True)
logger.info(f"Teacher Model {model_type_T} loaded")
#model_T = torch.hub.load('huggingface/pytorch-transformers', 'modelForSequenceClassification', 'gchhablani/bert-base-cased-finetuned-mnli', output_hidden_states=True)
model_T.to(device)
student = teachers_and_student['student']
model_type_S = student['model_type']
model_config_S = student['config']
checkpoint_S = student['checkpoint']
_,_,model_class_S = MODEL_CLASSES[model_type_S]
model_S = model_class_S(model_config_S, num_labels=num_labels)
if checkpoint_S is not None:
state_dict_S = torch.load(checkpoint_S, map_location='cpu')
missing_keys, un_keys = model_S.load_state_dict(state_dict_S,strict=False)
logger.info(f"missing keys:{missing_keys}")
logger.info(f"unexpected keys:{un_keys}")
else:
logger.warning("Initializing student randomly")
logger.info("Student Model loaded")
model_S.to(device)
if args.local_rank != -1 or n_gpu > 1:
if args.local_rank != -1:
raise NotImplementedError
elif n_gpu > 1:
if args.do_train:
model_T = torch.nn.DataParallel(model_T) #,output_device=n_gpu-1)
model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1)
if args.do_train:
#parameters
params = list(model_S.named_parameters())
all_trainable_params = divide_parameters(params, lr=args.learning_rate)
logger.info("Length of all_trainable_params: %d", len(all_trainable_params))
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
raise NotImplementedError
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)
num_train_steps = int(len(train_dataloader)//args.gradient_accumulation_steps * args.num_train_epochs)
########## DISTILLATION ###########
train_config = TrainingConfig(
gradient_accumulation_steps = args.gradient_accumulation_steps,
ckpt_frequency = args.ckpt_frequency,
log_dir = args.output_dir,
output_dir = args.output_dir,
fp16 = args.fp16,
device = args.device)
from matches import matches
intermediate_matches = None
if isinstance(args.matches,(list,tuple)):
intermediate_matches = []
for match in args.matches:
intermediate_matches += matches[match]
logger.info(f"{intermediate_matches}")
distill_config = DistillationConfig(
temperature=args.temperature,
intermediate_matches=intermediate_matches)
logger.info(f"{train_config}")
logger.info(f"{distill_config}")
adaptor_T = BertForGLUESimpleAdaptor
adaptor_S = BertForGLUESimpleAdaptor
distiller = GeneralDistiller(train_config = train_config,
distill_config = distill_config,
model_T = model_T, model_S = model_S,
adaptor_T = adaptor_T,
adaptor_S = adaptor_S)
optimizer = AdamW(all_trainable_params,lr=args.learning_rate)
scheduler_class = get_linear_schedule_with_warmup
scheduler_args = {'num_warmup_steps': int(args.warmup_proportion*num_train_steps),
'num_training_steps': num_train_steps}
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Forward batch size = %d", forward_batch_size)
logger.info(" Num backward steps = %d", num_train_steps)
callback_func = partial(predict, eval_datasets=eval_datasets, args=args)
with distiller:
distiller.train(optimizer, scheduler_class=scheduler_class, scheduler_args=scheduler_args, dataloader = train_dataloader,
num_epochs = args.num_train_epochs, callback=callback_func,max_grad_norm=1)
if not args.do_train and args.do_predict:
res = predict(model_S,eval_datasets,step=0,args=args)
print (res)
if __name__ == "__main__":
main()
| 42.237624 | 179 | 0.680497 |
cc361938c9c7cb7763411bada91158fe8e10c01d | 3,743 | py | Python | test/test_parser/test_convert.py | skirpichev/Mathics | 318e06dea8f1c70758a50cb2f95c9900150e3a68 | [
"Apache-2.0"
] | 1,920 | 2015-01-06T17:56:26.000Z | 2022-03-24T14:33:29.000Z | test/test_parser/test_convert.py | skirpichev/Mathics | 318e06dea8f1c70758a50cb2f95c9900150e3a68 | [
"Apache-2.0"
] | 868 | 2015-01-04T06:19:40.000Z | 2022-03-14T13:39:38.000Z | test/test_parser/test_convert.py | skirpichev/Mathics | 318e06dea8f1c70758a50cb2f95c9900150e3a68 | [
"Apache-2.0"
] | 240 | 2015-01-16T13:31:26.000Z | 2022-03-12T12:52:46.000Z | import unittest
import random
import sys
from mathics_scanner import (
IncompleteSyntaxError,
InvalidSyntaxError,
ScanError,
SingleLineFeeder,
)
from mathics.core.definitions import Definitions
from mathics.core.parser import parse
from mathics.core.expression import (
Symbol,
Integer,
Integer0,
Integer1,
Expression,
Real,
Rational,
String,
)
definitions = Definitions(add_builtin=True)
class ConvertTests(unittest.TestCase):
def parse(self, code):
return parse(definitions, SingleLineFeeder(code))
def check(self, expr1, expr2):
if isinstance(expr1, str):
expr1 = self.parse(expr1)
if isinstance(expr2, str):
expr2 = self.parse(expr2)
if expr1 is None:
assert expr2 is None
else:
assert expr1.sameQ(expr2)
def scan_error(self, string):
self.assertRaises(ScanError, self.parse, string)
def incomplete_error(self, string):
self.assertRaises(IncompleteSyntaxError, self.parse, string)
def invalid_error(self, string):
self.assertRaises(InvalidSyntaxError, self.parse, string)
def testSymbol(self):
self.check("xX", Symbol("Global`xX"))
self.check("context`name", Symbol("context`name"))
self.check("`name", Symbol("Global`name"))
self.check("`context`name", Symbol("Global`context`name"))
def testInteger(self):
self.check("0", Integer0)
self.check("1", Integer1)
self.check("-1", Integer(-1))
self.check("8^^23", Integer(19))
self.check("10*^3", Integer(10000))
self.check("10*^-3", Rational(1, 100))
self.check("8^^23*^2", Integer(1216))
n = random.randint(-sys.maxsize, sys.maxsize)
self.check(str(n), Integer(n))
n = random.randint(sys.maxsize, sys.maxsize * sys.maxsize)
self.check(str(n), Integer(n))
def testReal(self):
self.check("1.5", Real("1.5"))
self.check("1.5`", Real("1.5"))
self.check("0.0", Real(0))
self.check("-1.5`", Real("-1.5"))
self.check("0.00000000000000000", "0.")
self.check("0.000000000000000000`", "0.")
self.check("0.000000000000000000", "0.``18")
def testString(self):
self.check(r'"abc"', String("abc"))
self.incomplete_error(r'"abc')
self.check(r'"abc(*def*)"', String("abc(*def*)"))
self.check(r'"a\"b\\c"', String(r'a"b\c'))
self.incomplete_error(r'"\"')
self.invalid_error(r'\""')
def testAccuracy(self):
self.scan_error("1.5``")
self.check("1.0``20", Real("1.0", p=20))
@unittest.expectedFailure
def testLowAccuracy(self):
self.check("1.4``0", Real(0))
self.check("1.4``-20", Real(0))
def testPrecision(self):
self.check("1.`20", Real(1, p=20))
self.check("1.00000000000000000000000`", Real(1))
self.check("1.00000000000000000000000`30", Real(1, p=30))
@unittest.expectedFailure
def testLowPrecision(self):
self.check("1.4`1", Real("1", p=1))
self.check("1.4`0", Real(0, p=0))
self.check("1.4`-5", Real(0, p=0))
def testDerivative(self):
f = Symbol("Global`f")
self.check("f'", Expression(Expression("Derivative", Integer1), f))
self.check("f''", Expression(Expression("Derivative", Integer(2)), f))
self.check(
"(f'')'''",
Expression(
Expression("Derivative", Integer(3)),
Expression(Expression("Derivative", Integer(2)), f),
),
)
self.check("Derivative[f]", Expression("Derivative", f))
self.check("Derivative[1][f]'", "(f')'")
| 29.706349 | 78 | 0.585627 |
3d368f7e32e71ab2cde5159066d780d058e3c1b6 | 22,512 | py | Python | fd/vectorisation.py | ZakariaELHAJOUY/field-delineation | 2bd13d09b1fb107603fe5ccb862584e01aa5b067 | [
"MIT"
] | 53 | 2021-01-20T16:32:16.000Z | 2022-03-30T21:42:12.000Z | fd/vectorisation.py | ZakariaELHAJOUY/field-delineation | 2bd13d09b1fb107603fe5ccb862584e01aa5b067 | [
"MIT"
] | 5 | 2021-02-13T17:41:53.000Z | 2022-03-29T14:45:11.000Z | fd/vectorisation.py | ZakariaELHAJOUY/field-delineation | 2bd13d09b1fb107603fe5ccb862584e01aa5b067 | [
"MIT"
] | 21 | 2021-02-19T16:15:43.000Z | 2022-03-23T09:21:37.000Z | #
# Copyright (c) Sinergise, 2019 -- 2021.
#
# This file belongs to subproject "field-delineation" of project NIVA (www.niva4cap.eu).
# All rights reserved.
#
# This source code is licensed under the MIT license found in the LICENSE
# file in the root directory of this source tree.
#
import os
import time
import logging
from glob import glob
import copy
from functools import partial
from typing import List, Tuple
from dataclasses import dataclass
import rasterio
import numpy as np
import pandas as pd
import geopandas as gpd
from geopandas.tools import sjoin
from fs.copy import copy_dir
import pandarallel
from shapely.geometry import Polygon
from shapely.ops import unary_union
from lxml import etree
from sentinelhub import CRS
from .utils import BaseConfig, multiprocess, prepare_filesystem
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
@dataclass
class VectorisationConfig(BaseConfig):
tiffs_folder: str
time_intervals: List[str]
utms: List[str]
shape: Tuple[int, int]
buffer: Tuple[int, int]
weights_file: str
vrt_dir: str
predictions_dir: str
contours_dir: str
max_workers: int = 4
chunk_size: int = 500
chunk_overlap: int = 10
threshold: float = 0.6
cleanup: bool = True
skip_existing: bool = True
rows_merging: bool = True
@dataclass
class MergeUTMsConfig(BaseConfig):
time_intervals: List[str]
utms: List[str]
contours_dir: str
resulting_crs: str
max_area: float = None
simplify_tolerance: float = 2.5
n_workers: int = 34
average_function = """
import numpy as np
def average(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, buf_radius, gt, **kwargs):
p, w = np.split(np.array(in_ar), 2, axis=0)
w_sum = np.sum(w, axis=0)
p_sum = np.sum(p, axis=0)
v = np.sum(p*w, axis=0)
out_ar[:] = np.clip(np.where(w_sum==0, p_sum, v/w_sum), a_min=0., a_max=1.)
"""
def p_simplify(r, tolerance=2.5):
""" Helper function to parallelise simplification of geometries """
return r.geometry.simplify(tolerance)
def p_union(r):
""" Helper function to parallelise union of geometries """
return r.l_geom.union(r.r_geom)
def get_weights(shape: Tuple[int, int], buffer: Tuple[int, int], low: float = 0, high: float = 1) -> np.ndarray:
""" Create weights array
Function to create a numpy array of dimension, that outputs a linear gradient from low to high from the edges
to the 2*buffer, and 1 elsewhere.
"""
weight = np.ones(shape)
weight[..., :2 * buffer[0]] = np.tile(np.linspace(low, high, 2 * buffer[0]), shape[0]).reshape(
(shape[0], 2 * buffer[0]))
weight[..., -2 * buffer[0]:] = np.tile(np.linspace(high, low, 2 * buffer[0]), shape[0]).reshape(
(shape[0], 2 * buffer[0]))
weight[:2 * buffer[1], ...] = weight[:2 * buffer[1], ...] * np.repeat(np.linspace(low, high, shape[1]),
2 * buffer[1]).reshape(
(2 * buffer[1], shape[1]))
weight[-2 * buffer[1]:, ...] = weight[-2 * buffer[1]:, ...] * np.repeat(np.linspace(high, low, 2 * buffer[1]),
shape[1]).reshape(
(2 * buffer[1], shape[1]))
return weight.astype(np.float32)
def write_vrt(files: List[str], weights_file: str, out_vrt: str, function: str = average_function):
""" Write virtual raster
Function that will first build a temp.vrt for the input files, and then modify it for purposes of spatial merging
of overlaps using the provided function
"""
# build a vrt from list of input files
gdal_str = f'gdalbuildvrt temp.vrt -b 1 {" ".join(files)}'
os.system(gdal_str)
# fix the vrt
root = etree.parse('temp.vrt').getroot()
vrtrasterband = root.find('VRTRasterBand')
rasterbandchildren = list(vrtrasterband)
root.remove(vrtrasterband)
dict_attr = {'dataType': 'Float32', 'band': '1', 'subClass': 'VRTDerivedRasterBand'}
raster_band_tag = etree.SubElement(root, 'VRTRasterBand', dict_attr)
# Add childern tags to derivedRasterBand tag
pix_func_tag = etree.SubElement(raster_band_tag, 'PixelFunctionType')
pix_func_tag.text = 'average'
pix_func_tag2 = etree.SubElement(raster_band_tag, 'PixelFunctionLanguage')
pix_func_tag2.text = 'Python'
pix_func_code = etree.SubElement(raster_band_tag, 'PixelFunctionCode')
pix_func_code.text = etree.CDATA(function)
new_sources = []
for child in rasterbandchildren:
raster_band_tag.append(child)
if child.tag == 'ComplexSource':
new_source = copy.deepcopy(child)
new_source.find('SourceFilename').text = weights_file
new_sources.append(new_source)
for new_source in new_sources:
raster_band_tag.append(new_source)
os.remove('temp.vrt')
with open(out_vrt, 'w') as out:
out.writelines(etree.tounicode(root, pretty_print=True))
def run_contour(col: int, row: int, size: int, vrt_file: str, threshold: float = 0.6,
contours_dir: str = '.', cleanup: bool = True) -> Tuple[str, bool, str]:
""" Will create a (small) tiff file over a srcwin (row, col, size, size) and run gdal_contour on it. """
file = f'merged_{row}_{col}_{size}_{size}'
try:
gdal_str = f'gdal_translate --config GDAL_VRT_ENABLE_PYTHON YES -srcwin {col} {row} {size} {size} {vrt_file} {file}.tiff'
os.system(gdal_str)
gdal_str = f'gdal_contour -of gpkg {file}.tiff {contours_dir}/{file}.gpkg -i {threshold} -amin amin -amax amax -p'
os.system(gdal_str)
if cleanup:
os.remove(f'{file}.tiff')
return f'{contours_dir}/{file}.gpkg', True, None
except Exception as exc:
return f'{contours_dir}/{file}.gpkg', False, exc
def runner(arg: List):
"""Function that wraps run_contour to be used with sg_utils.postprocessing"""
return run_contour(*arg)
def unpack_contours(df_filename: str, threshold: float = 0.6) -> gpd.GeoDataFrame:
""" Convert multipolygon contour row above given threshold into multiple Polygon rows. """
df = gpd.read_file(df_filename)
if len(df) <= 2:
if len(df[df.amax > threshold]):
return gpd.GeoDataFrame(geometry=[geom for geom in df[df.amax > threshold].iloc[0].geometry], crs=df.crs)
else:
return gpd.GeoDataFrame(geometry=[], crs=df.crs)
raise ValueError(
f"gdal_contour dataframe {df_filename} has {len(df)} contours, "
f"but should have maximal 2 entries (one below and/or one above threshold)!")
def split_intersecting(df: gpd.GeoDataFrame, overlap: Polygon) -> Tuple[gpd.GeoDataFrame, gpd.GeoDataFrame]:
""" Find entries that overlap with a given polygon """
index = df.sindex
possible_matches_index = list(index.intersection(overlap.bounds))
possible_matches = df.iloc[possible_matches_index]
precise_matches = possible_matches.intersects(overlap).index
if len(precise_matches):
return df[~df.index.isin(precise_matches)].copy(), df[df.index.isin(precise_matches)].copy()
else:
return df, gpd.GeoDataFrame(geometry=[], crs=df.crs)
def merge_intersecting(df1: gpd.GeoDataFrame, df2: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
""" Merge two dataframes of geometries into one """
multi = unary_union(list(df1.geometry) + list(df2.geometry))
if multi.is_empty:
return gpd.GeoDataFrame(geometry=[], crs=df1.crs)
if multi.geom_type == 'Polygon':
return gpd.GeoDataFrame(geometry=[multi], crs=df1.crs)
return gpd.GeoDataFrame(geometry=[g for g in multi.geoms], crs=df1.crs)
def concat_consecutive(merged: gpd.GeoDataFrame, previous: gpd.GeoDataFrame, current: gpd.GeoDataFrame,
current_offset: Tuple[int, int], overlap_size: Tuple[int, int] = (10, 500),
direction: Tuple[int, int] = (490, 0), transform=None) -> Tuple[gpd.GeoDataFrame,
gpd.GeoDataFrame]:
list_dfs = []
if merged is not None:
list_dfs = [merged]
if not (len(previous) or len(current)):
if merged is not None:
return merged, gpd.GeoDataFrame(geometry=[], crs=merged.crs)
else:
return merged, gpd.GeoDataFrame(geometry=[])
x, y = current_offset
a, b = overlap_size
overlap_poly = Polygon.from_bounds(*(transform * (x, y)), *(transform * (x + a, y + b)))
if len(previous) == 0:
return merged, current
if len(current) == 0:
merged = gpd.GeoDataFrame(pd.concat([merged, previous]), crs=previous.crs)
return merged, gpd.GeoDataFrame(geometry=[], crs=merged.crs)
previous_non, previous_int = split_intersecting(previous, overlap_poly)
current_non, current_int = split_intersecting(current, overlap_poly)
intersecting = merge_intersecting(previous_int, current_int)
if len(intersecting):
# check if intersecting "touches" the "right edge", if so, add it to current_non
x = x + direction[0]
y = y + direction[1]
overlap_poly_end = Polygon.from_bounds(*(transform * (x, y)), *(transform * (x + a, y + b)))
intersecting_ok, intersecting_next = split_intersecting(intersecting, overlap_poly_end)
merged = gpd.GeoDataFrame(pd.concat(list_dfs + [previous_non, intersecting_ok]), crs=previous.crs)
intersecting_next = gpd.GeoDataFrame(pd.concat([intersecting_next, current_non]), crs=previous.crs)
return merged, intersecting_next
return gpd.GeoDataFrame(pd.concat(list_dfs + [previous_non]), crs=previous.crs), current_non
def _process_row(row: int, vrt_file: str, vrt_dim: Tuple, contours_dir: str = '.', size: int = 500, buff: int = 10,
threshold: float = 0.6, cleanup: bool = True, transform=None, skip_existing: bool = True) \
-> Tuple[str, bool, str]:
merged_file = f'{contours_dir}/merged_row_{row}.gpkg'
if skip_existing and os.path.exists(merged_file):
return merged_file, True, 'Loaded existing file ...'
try:
col = 0
merged = None
prev_name, finished, exc = run_contour(col, row, size, vrt_file, threshold, contours_dir, cleanup)
if not finished:
return merged_file, finished, exc
prev = unpack_contours(prev_name, threshold=threshold)
if cleanup:
os.remove(prev_name)
while col <= (vrt_dim[0] - size):
col = col + size - buff
offset = col, row
cur_name, finished, exc = run_contour(col, row, size, vrt_file, threshold, contours_dir, cleanup)
if not finished:
return merged_file, finished, exc
cur = unpack_contours(cur_name, threshold=threshold)
merged, prev = concat_consecutive(merged, prev, cur, offset, (buff, size), (size - buff, 0), transform)
if cleanup:
os.remove(cur_name)
merged = gpd.GeoDataFrame(pd.concat([merged, prev]), crs=prev.crs)
merged.to_file(merged_file, driver='GPKG')
return merged_file, True, None
except Exception as exc:
return merged_file, False, exc
def merge_rows(rows: List[str], vrt_file: str, size: int = 500, buffer: int = 10) -> gpd.GeoDataFrame:
with rasterio.open(vrt_file) as src:
meta = src.meta
vrt_dim = meta['width'], meta['height']
transform = meta['transform']
merged = None
prev_name = rows[0]
prev = gpd.read_file(prev_name)
for ridx, cur_name in enumerate(rows[1:], start=1):
cur = gpd.read_file(cur_name)
merged, prev = concat_consecutive(merged, prev, cur, (0, ridx * (size - buffer)), (vrt_dim[0], buffer),
(0, size - buffer), transform)
merged = gpd.GeoDataFrame(pd.concat([merged, prev]), crs=prev.crs)
return merged
def spatial_merge_contours(vrt_file: str, contours_dir: str = '.', size: int = 500, buffer: int = 10,
threshold: float = 0.6, cleanup: bool = True, skip_existing: bool = True,
rows_merging: bool = True, max_workers: int = 4) -> gpd.GeoDataFrame:
results = process_rows(vrt_file=vrt_file, contours_dir=contours_dir, size=size, buffer=buffer, threshold=threshold,
cleanup=cleanup, skip_existing=skip_existing, max_workers=max_workers)
failed = [(file, excp) for file, finished, excp in results if not finished]
if len(failed):
LOGGER.warning('Some rows failed:')
LOGGER.warning('\n'.join([f'{file}: {excp}' for file, excp in failed]))
return None
if rows_merging:
rows = [file for file, _, _ in results]
merged = merge_rows(rows, vrt_file=vrt_file, size=size, buffer=buffer)
if cleanup:
for file in rows:
os.remove(file)
return merged
return None
def process_rows(vrt_file: str, contours_dir: str = '.', size: int = 500, buffer: int = 10,
threshold: float = 0.6, cleanup: bool = True, skip_existing: bool = True,
max_workers: int = 4) -> Tuple[str, bool, str]:
with rasterio.open(vrt_file) as src:
meta = src.meta
vrt_dim = meta['width'], meta['height']
transform = meta['transform']
partial_process_row = partial(_process_row, vrt_file=vrt_file, vrt_dim=vrt_dim, contours_dir=contours_dir,
size=size, buff=buffer, threshold=threshold, cleanup=cleanup,
skip_existing=skip_existing,
transform=transform)
rows = list(range(0, vrt_dim[1], size - buffer))
return multiprocess(partial_process_row, rows, max_workers=max_workers)
def merging_rows(row_dict: dict, skip_existing: bool = True) -> str:
""" merge row files into a single file per utm """
start = time.time()
merged_contours_file = f'{row_dict["contours_dir"]}/merged_{row_dict["time_interval"]}_{row_dict["utm"]}.gpkg'
if skip_existing and os.path.exists(merged_contours_file):
return merged_contours_file
merged = merge_rows(rows=row_dict['rows'], vrt_file=row_dict['vrt_file'],
size=row_dict['chunk_size'], buffer=row_dict['chunk_overlap'])
merged.to_file(merged_contours_file, driver='GPKG')
LOGGER.info(f'Merging rows and writing results for {row_dict["time_interval"]}/{row_dict["utm"]} done'
f' in {(time.time() - start) / 60} min!\n\n')
return merged_contours_file
def run_vectorisation(config: VectorisationConfig) -> List[str]:
""" Run vectorisation process on entire AOI for the given time intervals """
filesystem = prepare_filesystem(config)
LOGGER.info(f'Copy tiff files locally to {config.predictions_dir}')
for time_interval in config.time_intervals:
if not os.path.exists(f'{config.predictions_dir}/{time_interval}'):
if not filesystem.exists(f'{config.tiffs_folder}/{time_interval}/'):
filesystem.makedirs(f'{config.tiffs_folder}/{time_interval}/')
copy_dir(filesystem, f'{config.tiffs_folder}/{time_interval}/',
f'{config.predictions_dir}/', f'{time_interval}')
LOGGER.info(f'Move files to utm folders')
for time_interval in config.time_intervals:
for utm in config.utms:
utm_dir = f'{config.predictions_dir}/{time_interval}/utm{utm}'
os.makedirs(utm_dir, exist_ok=True)
tiffs_to_move = glob(f'{config.predictions_dir}/{time_interval}/*-{utm}.tiff')
for tiff in tiffs_to_move:
tiff_name = os.path.basename(tiff)
os.rename(tiff, f'{utm_dir}/{tiff_name}')
LOGGER.info(f'Create weights file {config.weights_file}')
with rasterio.open(config.weights_file, 'w', driver='gTIFF', width=config.shape[0], height=config.shape[1], count=1,
dtype=np.float32) as dst:
dst.write_band(1, get_weights(config.shape, config.buffer))
rows = []
for time_interval in config.time_intervals:
for utm in config.utms:
start = time.time()
LOGGER.info(f'Running contours for {time_interval}/{utm}!')
contours_dir = f'{config.contours_dir}/{time_interval}/utm{utm}/'
LOGGER.info(f'Create contour folder {contours_dir}')
os.makedirs(contours_dir, exist_ok=True)
predictions_dir = f'{config.predictions_dir}/{time_interval}/utm{utm}/'
tifs = glob(f'{predictions_dir}*.tiff')
output_vrt = f'{config.vrt_dir}/vrt_{time_interval}_{utm}.vrt'
write_vrt(tifs, config.weights_file, output_vrt)
results = process_rows(output_vrt, contours_dir,
max_workers=config.max_workers,
size=config.chunk_size,
buffer=config.chunk_overlap,
threshold=config.threshold,
cleanup=config.cleanup,
skip_existing=config.skip_existing)
failed = [(file, excp) for file, finished, excp in results if not finished]
if len(failed):
LOGGER.warning('Some rows failed:')
LOGGER.warning('\n'.join([f'{file}: {excp}' for file, excp in failed]))
# raise Exception(f'{len(failed)} rows failed! ')
LOGGER.warning(f'{len(failed)} rows failed! ')
rows.append({'time_interval': time_interval,
'utm': utm,
'vrt_file': output_vrt,
'rows': [file for file, finished, _ in results if finished],
'chunk_size': config.chunk_size,
'chunk_overlap': config.chunk_overlap,
'contours_dir': config.contours_dir
})
LOGGER.info(f'Row contours processing for {time_interval}/{utm} done in {(time.time() - start) / 60} min!\n\n')
list_of_merged_files = multiprocess(merging_rows, rows, max_workers=config.max_workers)
return list_of_merged_files
def utm_zone_merging(config: MergeUTMsConfig, overlap_df: gpd.GeoDataFrame, zones: gpd.GeoDataFrame,
parallel: bool = False):
"""
Function to perform utm zone merging. Currently support merging of 2 UTM zones only
It is somewhat of a concept, so the code above (getting the overlap) still has to be run before this one
"""
assert len(config.utms) == 2, 'The function supports merging of 2 UTMs only at the moment'
assert CRS(config.resulting_crs).pyproj_crs().axis_info[0].unit_name == 'metre', \
'The resulting CRS should have axis units in metres.'
if parallel:
pandarallel.pandarallel.initialize(nb_workers=config.n_workers, progress_bar=True)
for time_window in config.time_intervals:
LOGGER.info(f'merging utms for {time_window} ...')
merged_dfs = [gpd.read_file(f'{config.contours_dir}/merged_{time_window}_{utm}.gpkg')
for utm in config.utms]
# to speed up some processing, remove the biggest fields beforehand
LOGGER.info(f'\tfilter vectors by area ...')
if config.max_area:
merged_dfs = [merged_df[merged_df.geometry.area < config.max_area] for merged_df in merged_dfs]
LOGGER.info(f'\tsplitting away non-overlapping zones ...')
non_overlapping_utms, overlapping_utms = [], []
for merged_df, utm in zip(merged_dfs, config.utms):
non_over, over = split_intersecting(merged_df, overlap_df.to_crs(epsg=int(utm)).iloc[0].geometry)
zone = zones[zones['crs'] == utm].to_crs(epsg=int(utm)).iloc[0].geometry
over['distance'] = over.geometry.centroid.distance(zone)
over.to_crs(epsg=4326, inplace=True)
non_overlapping_utms.append(non_over)
overlapping_utms.append(over)
prefixes = ['l', 'r']
for overlapping_utm, prefix in zip(overlapping_utms, prefixes):
overlapping_utm[f'{prefix}_geom'] = overlapping_utm.geometry
overlapping_utm[f'{prefix}_index'] = overlapping_utm.index
LOGGER.info(f'\tfinding overlapping geometries with sjoin ...')
overlaps = sjoin(overlapping_utms[0], overlapping_utms[1], how='inner', op='intersects')
reminder_utms = [overlapping_utm[~overlapping_utm[f'{prefix}_index'].isin(
overlaps[f'{prefix}_index'])][['geometry']].copy()
for overlapping_utm, prefix in zip(overlapping_utms, prefixes)]
LOGGER.info(f'\trunning union of {len(overlaps)} overlapping geometries ...')
if parallel:
overlaps['geometry'] = overlaps.parallel_apply(p_union, axis=1)
else:
overlaps['geometry'] = overlaps.apply(lambda r: r.l_geom.union(r.r_geom), axis=1)
overlaps = overlaps[~(overlaps.is_empty | overlaps.geometry.area.isna())]
LOGGER.info(f'\tcreate dataframe of overlaps ...')
unified_geoms = unary_union(list(overlaps.geometry)).geoms
merged_overlaps = gpd.GeoDataFrame(geometry=[geom for geom in unified_geoms],
crs=overlaps.crs)
merged_overlaps.to_crs(config.resulting_crs, inplace=True)
LOGGER.info(f'\tmerging results ...')
for gdf in non_overlapping_utms + reminder_utms:
gdf.to_crs(config.resulting_crs, inplace=True)
gdfs_to_merge = non_overlapping_utms + reminder_utms + [merged_overlaps]
delineated_fields = gpd.GeoDataFrame(pd.concat(gdfs_to_merge), crs=config.resulting_crs)
delineated_fields = delineated_fields[delineated_fields.geometry.area < config.max_area]
LOGGER.info(f'\tsimplifying geometries ...')
if parallel:
partial_fn = partial(p_simplify, tolerance=config.simplify_tolerance)
delineated_fields['geometry'] = delineated_fields.parallel_apply(partial_fn, axis=1)
else:
delineated_fields['geometry'] = delineated_fields.geometry.simplify(config.simplify_tolerance)
LOGGER.info(f'\twriting output ...')
delineated_fields.to_file(f'{config.contours_dir}/delineated_fields_{time_window}.gpkg', driver='GPKG')
| 42.798479 | 129 | 0.640903 |
b54cc793ba5e8b11114599a3cb8bc1b0deea9a11 | 4,221 | py | Python | experiment_estimate_opt.py | xyguo/OnlineKZMedian | aa66be0406f7597d123c14bf72450dce432f965d | [
"MIT"
] | null | null | null | experiment_estimate_opt.py | xyguo/OnlineKZMedian | aa66be0406f7597d123c14bf72450dce432f965d | [
"MIT"
] | null | null | null | experiment_estimate_opt.py | xyguo/OnlineKZMedian | aa66be0406f7597d123c14bf72450dce432f965d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This code is for estimating the offline OPT on the data set.
"""
import numpy as np
from datetime import datetime
from sklearn.metrics import pairwise_distances
from time import time
from utils import compute_cost, kz_means, kzmedian_cost_, coreset
from utils import gaussian_mixture, add_outliers, get_realworld_data
# Parameters
n_clients = 10000
n_available_facilities = 5000
n_clusters_range = [10, 50, 100]
n_outliers = 200
F_status = 'dynamic'
# F_status = 'static'
z_status = 'static'
# z_status = 'dynamic'
if z_status == 'static':
n_outliers_func = lambda _: n_outliers
else:
n_outliers_func = lambda x: int(x / n_clients * n_outliers)
n_true_outliers = 0
random_state = None
# data_name = 'gmm'
# data_name = 'power'
data_name = 'shuttle'
# data_name = 'letter'
# data_name = 'covertype'
# data_name = 'skin'
now = datetime.now()
starting_time = now.strftime("%b-%d")
if data_name != 'gmm':
print("Read in data set {} {} shuffle ... ".format(data_name, 'and' if random_state is not None else 'without'))
X = get_realworld_data(data_name)
if random_state is not None:
np.random.seed(random_state)
np.random.shuffle(X)
# take the first 100000 clients
print("Take the first {} data points (with dimension {}) as client set ... "
.format(n_clients, X.shape[1]))
C = X[:n_clients]
C = add_outliers(C, n_outliers=n_true_outliers, dist_factor=150)
else:
print("Create synthesized GMM data of size {} and shuffle it ... ".format(n_clients))
n_clusters = 5
C = gaussian_mixture(n_samples=n_clients, n_clusters=n_clusters, n_outliers=n_true_outliers, n_features=5,
outliers_dist_factor=50)
n_features = C.shape[1]
print("\nCreate {} potential facility locations via pre-clustering or coreset ... ".format(n_available_facilities))
t1_start = time()
if F_status == 'dynamic':
F = C.copy()
else:
F, _, _ = coreset(C, size=n_available_facilities, n_seeds=100, n_outliers=0)
pre_clustering_time = time() - t1_start
print("Pre-clustering takes {0:.2f} secs".format(pre_clustering_time))
if n_clients * n_available_facilities > 2e7:
dist_mat = None
else:
print("\nPre-compute distance matrix ... ")
t2_start = time()
dist_mat = pairwise_distances(C, C)
dist_mat_time = time() - t2_start
print("Computing distance matrix takes {0:.2f} secs".format(dist_mat_time))
# result
opt_window_size = 50
results_var_k = {
'data_name': data_name,
'opt_cost': [],
'n_features': n_features,
'n_clients': n_clients,
'n_clusters_range': n_clusters_range,
'n_outliers': n_outliers,
'n_artificial_outliers': n_true_outliers,
'rando_seed': random_state,
'opt_window_size': opt_window_size,
'opt_sample_point': []
}
print("\nOn dataset {}, try different k with n={}, {} z={}, z'={}"
.format(data_name, n_clients, z_status, n_outliers, n_true_outliers))
for k in n_clusters_range:
steps = np.arange(k + n_outliers, len(C), opt_window_size)
results_var_k['opt_sample_point'].append(steps)
print("\n===\n k={}, window size={}".format(k, opt_window_size))
# Run!
t3_start = time()
estimated_opt = []
for n in steps:
kzm_costs = []
arrived_C = C[:n]
for i in range(5):
z = n_outliers_func(n)
offline_kzmeans_centers = kz_means(arrived_C, n_clusters=k, n_outliers=z)
c = compute_cost(arrived_C, offline_kzmeans_centers,
cost_func=kzmedian_cost_, remove_outliers=z)
kzm_costs.append(c)
offline_kzmeans_cost = min(kzm_costs)
print("\n--- estimated OPT[0:{}] (with z={}) = min({}) = {}"
.format(n, z, kzm_costs, offline_kzmeans_cost))
estimated_opt.append(offline_kzmeans_cost)
results_var_k['opt_cost'].append(np.array(estimated_opt))
kzmeans_time = time() - t3_start
print("--- Done, takes {0:.2f} secs for k={1:d}".format(kzmeans_time, k))
filename = "kmeans--_for_dataset_{}_{}_z-star_{}_opt-wd_{}_rseed_{}_z_{}_{}"\
.format(data_name, n_clients, n_true_outliers, opt_window_size, random_state, z_status, starting_time)
np.savez(filename, **results_var_k)
| 35.771186 | 116 | 0.680171 |
5db4d02afc2830d573341a78e81469b6de5b7896 | 10,927 | py | Python | venv/Lib/site-packages/websocket/_http.py | GuilhermeJC13/storIA | eeecbe9030426f70c6aa73ca0ce8382860c8495c | [
"MIT"
] | 4 | 2021-07-27T23:39:02.000Z | 2021-09-23T04:17:08.000Z | venv/Lib/site-packages/websocket/_http.py | GuilhermeJC13/storIA | eeecbe9030426f70c6aa73ca0ce8382860c8495c | [
"MIT"
] | 12 | 2021-04-11T19:46:06.000Z | 2021-06-18T16:08:37.000Z | venv/Lib/site-packages/websocket/_http.py | GuilhermeJC13/storIA | eeecbe9030426f70c6aa73ca0ce8382860c8495c | [
"MIT"
] | 3 | 2021-07-27T17:33:58.000Z | 2021-07-29T12:46:59.000Z | """
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import errno
import os
import socket
import sys
from ._exceptions import *
from ._logging import *
from ._socket import*
from ._ssl_compat import *
from ._url import *
from base64 import encodebytes as base64encode
__all__ = ["proxy_info", "connect", "read_headers"]
try:
import socks
ProxyConnectionError = socks.ProxyConnectionError
HAS_PYSOCKS = True
except:
class ProxyConnectionError(BaseException):
pass
HAS_PYSOCKS = False
class proxy_info(object):
def __init__(self, **options):
self.type = options.get("proxy_type") or "http"
if not(self.type in ['http', 'socks4', 'socks5', 'socks5h']):
raise ValueError("proxy_type must be 'http', 'socks4', 'socks5' or 'socks5h'")
self.host = options.get("http_proxy_host", None)
if self.host:
self.port = options.get("http_proxy_port", 0)
self.auth = options.get("http_proxy_auth", None)
self.no_proxy = options.get("http_no_proxy", None)
else:
self.port = 0
self.auth = None
self.no_proxy = None
def _open_proxied_socket(url, options, proxy):
hostname, port, resource, is_secure = parse_url(url)
if not HAS_PYSOCKS:
raise WebSocketException("PySocks module not found.")
ptype = socks.SOCKS5
rdns = False
if proxy.type == "socks4":
ptype = socks.SOCKS4
if proxy.type == "http":
ptype = socks.HTTP
if proxy.type[-1] == "h":
rdns = True
sock = socks.create_connection(
(hostname, port),
proxy_type=ptype,
proxy_addr=proxy.host,
proxy_port=int(proxy.port),
proxy_rdns=rdns,
proxy_username=proxy.auth[0] if proxy.auth else None,
proxy_password=proxy.auth[1] if proxy.auth else None,
timeout=options.timeout,
socket_options=DEFAULT_SOCKET_OPTION + options.sockopt
)
if is_secure and HAVE_SSL:
sock = _ssl_socket(sock, options.sslopt, hostname)
elif is_secure:
raise WebSocketException("SSL not available.")
return sock, (hostname, port, resource)
def connect(url, options, proxy, socket):
if proxy.host and not socket and not (proxy.type == 'http'):
return _open_proxied_socket(url, options, proxy)
hostname, port, resource, is_secure = parse_url(url)
if socket:
return socket, (hostname, port, resource)
addrinfo_list, need_tunnel, auth = _get_addrinfo_list(
hostname, port, is_secure, proxy)
if not addrinfo_list:
raise WebSocketException(
"Host not found.: " + hostname + ":" + str(port))
sock = None
try:
sock = _open_socket(addrinfo_list, options.sockopt, options.timeout)
if need_tunnel:
sock = _tunnel(sock, hostname, port, auth)
if is_secure:
if HAVE_SSL:
sock = _ssl_socket(sock, options.sslopt, hostname)
else:
raise WebSocketException("SSL not available.")
return sock, (hostname, port, resource)
except:
if sock:
sock.close()
raise
def _get_addrinfo_list(hostname, port, is_secure, proxy):
phost, pport, pauth = get_proxy_info(
hostname, is_secure, proxy.host, proxy.port, proxy.auth, proxy.no_proxy)
try:
# when running on windows 10, getaddrinfo without socktype returns a socktype 0.
# This generates an error exception: `_on_error: exception Socket type must be stream or datagram, not 0`
# or `OSError: [Errno 22] Invalid argument` when creating socket. Force the socket type to SOCK_STREAM.
if not phost:
addrinfo_list = socket.getaddrinfo(
hostname, port, 0, socket.SOCK_STREAM, socket.SOL_TCP)
return addrinfo_list, False, None
else:
pport = pport and pport or 80
# when running on windows 10, the getaddrinfo used above
# returns a socktype 0. This generates an error exception:
# _on_error: exception Socket type must be stream or datagram, not 0
# Force the socket type to SOCK_STREAM
addrinfo_list = socket.getaddrinfo(phost, pport, 0, socket.SOCK_STREAM, socket.SOL_TCP)
return addrinfo_list, True, pauth
except socket.gaierror as e:
raise WebSocketAddressException(e)
def _open_socket(addrinfo_list, sockopt, timeout):
err = None
for addrinfo in addrinfo_list:
family, socktype, proto = addrinfo[:3]
sock = socket.socket(family, socktype, proto)
sock.settimeout(timeout)
for opts in DEFAULT_SOCKET_OPTION:
sock.setsockopt(*opts)
for opts in sockopt:
sock.setsockopt(*opts)
address = addrinfo[4]
err = None
while not err:
try:
sock.connect(address)
except ProxyConnectionError as error:
err = WebSocketProxyException(str(error))
err.remote_ip = str(address[0])
continue
except socket.error as error:
error.remote_ip = str(address[0])
try:
eConnRefused = (errno.ECONNREFUSED, errno.WSAECONNREFUSED)
except:
eConnRefused = (errno.ECONNREFUSED, )
if error.errno == errno.EINTR:
continue
elif error.errno in eConnRefused:
err = error
continue
else:
if sock:
sock.close()
raise error
else:
break
else:
continue
break
else:
if err:
raise err
return sock
def _wrap_sni_socket(sock, sslopt, hostname, check_hostname):
context = ssl.SSLContext(sslopt.get('ssl_version', ssl.PROTOCOL_TLS))
if sslopt.get('cert_reqs', ssl.CERT_NONE) != ssl.CERT_NONE:
cafile = sslopt.get('ca_certs', None)
capath = sslopt.get('ca_cert_path', None)
if cafile or capath:
context.load_verify_locations(cafile=cafile, capath=capath)
elif hasattr(context, 'load_default_certs'):
context.load_default_certs(ssl.Purpose.SERVER_AUTH)
if sslopt.get('certfile', None):
context.load_cert_chain(
sslopt['certfile'],
sslopt.get('keyfile', None),
sslopt.get('password', None),
)
# see
# https://github.com/liris/websocket-client/commit/b96a2e8fa765753e82eea531adb19716b52ca3ca#commitcomment-10803153
context.verify_mode = sslopt['cert_reqs']
if HAVE_CONTEXT_CHECK_HOSTNAME:
context.check_hostname = check_hostname
if 'ciphers' in sslopt:
context.set_ciphers(sslopt['ciphers'])
if 'cert_chain' in sslopt:
certfile, keyfile, password = sslopt['cert_chain']
context.load_cert_chain(certfile, keyfile, password)
if 'ecdh_curve' in sslopt:
context.set_ecdh_curve(sslopt['ecdh_curve'])
return context.wrap_socket(
sock,
do_handshake_on_connect=sslopt.get('do_handshake_on_connect', True),
suppress_ragged_eofs=sslopt.get('suppress_ragged_eofs', True),
server_hostname=hostname,
)
def _ssl_socket(sock, user_sslopt, hostname):
sslopt = dict(cert_reqs=ssl.CERT_REQUIRED)
sslopt.update(user_sslopt)
certPath = os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE')
if certPath and os.path.isfile(certPath) \
and user_sslopt.get('ca_certs', None) is None:
sslopt['ca_certs'] = certPath
elif certPath and os.path.isdir(certPath) \
and user_sslopt.get('ca_cert_path', None) is None:
sslopt['ca_cert_path'] = certPath
if sslopt.get('server_hostname', None):
hostname = sslopt['server_hostname']
check_hostname = sslopt["cert_reqs"] != ssl.CERT_NONE and sslopt.pop(
'check_hostname', True)
sock = _wrap_sni_socket(sock, sslopt, hostname, check_hostname)
if not HAVE_CONTEXT_CHECK_HOSTNAME and check_hostname:
match_hostname(sock.getpeercert(), hostname)
return sock
def _tunnel(sock, host, port, auth):
debug("Connecting proxy...")
connect_header = "CONNECT %s:%d HTTP/1.1\r\n" % (host, port)
connect_header += "Host: %s:%d\r\n" % (host, port)
# TODO: support digest auth.
if auth and auth[0]:
auth_str = auth[0]
if auth[1]:
auth_str += ":" + auth[1]
encoded_str = base64encode(auth_str.encode()).strip().decode().replace('\n', '')
connect_header += "Proxy-Authorization: Basic %s\r\n" % encoded_str
connect_header += "\r\n"
dump("request header", connect_header)
send(sock, connect_header)
try:
status, resp_headers, status_message = read_headers(sock)
except Exception as e:
raise WebSocketProxyException(str(e))
if status != 200:
raise WebSocketProxyException(
"failed CONNECT via proxy status: %r" % status)
return sock
def read_headers(sock):
status = None
status_message = None
headers = {}
trace("--- response header ---")
while True:
line = recv_line(sock)
line = line.decode('utf-8').strip()
if not line:
break
trace(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
if len(status_info) > 2:
status_message = status_info[2]
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
if key.lower() == "set-cookie" and headers.get("set-cookie"):
headers["set-cookie"] = headers.get("set-cookie") + "; " + value.strip()
else:
headers[key.lower()] = value.strip()
else:
raise WebSocketException("Invalid header")
trace("-----------------------")
return status, headers, status_message
| 33.621538 | 118 | 0.619932 |
321a4e34e6018707364075d6c8f72522fe86c0bf | 793 | py | Python | aiogram/__init__.py | Latand/aiogram | 78aee861bb77601691916feb5a9c8efbdb65956b | [
"MIT"
] | 3 | 2020-12-06T16:55:53.000Z | 2021-11-19T19:25:57.000Z | aiogram/__init__.py | Latand/aiogram | 78aee861bb77601691916feb5a9c8efbdb65956b | [
"MIT"
] | null | null | null | aiogram/__init__.py | Latand/aiogram | 78aee861bb77601691916feb5a9c8efbdb65956b | [
"MIT"
] | 2 | 2020-12-30T09:51:30.000Z | 2021-11-10T16:50:28.000Z | import asyncio
import os
from . import bot
from . import contrib
from . import dispatcher
from . import types
from . import utils
from .bot import Bot
from .dispatcher import Dispatcher
from .dispatcher import filters
from .dispatcher import middlewares
from .utils import exceptions, executor, helper, markdown as md
try:
import uvloop
except ImportError:
uvloop = None
else:
if 'DISABLE_UVLOOP' not in os.environ:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
__all__ = [
'Bot',
'Dispatcher',
'__api_version__',
'__version__',
'bot',
'contrib',
'dispatcher',
'exceptions',
'executor',
'filters',
'helper',
'md',
'middlewares',
'types',
'utils'
]
__version__ = '2.0.2.dev1'
__api_version__ = '4.1'
| 18.44186 | 63 | 0.675914 |
ebde7a07b9aea1f8999e65cc76f6ea31f99617a2 | 4,400 | py | Python | tests/enumerator_test.py | jnice-81/dace | 5211794a2d17b7189037ac485ab0b292fb02aa0d | [
"BSD-3-Clause"
] | 227 | 2019-03-15T23:39:06.000Z | 2022-03-30T07:49:08.000Z | tests/enumerator_test.py | jnice-81/dace | 5211794a2d17b7189037ac485ab0b292fb02aa0d | [
"BSD-3-Clause"
] | 834 | 2019-07-31T22:49:31.000Z | 2022-03-28T14:01:32.000Z | tests/enumerator_test.py | jnice-81/dace | 5211794a2d17b7189037ac485ab0b292fb02aa0d | [
"BSD-3-Clause"
] | 64 | 2019-03-19T05:40:37.000Z | 2022-03-11T15:02:42.000Z | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace.transformation.estimator.enumeration.brute_force_enumerator import BruteForceEnumerator
from dace.transformation.estimator.enumeration.connected_enumerator import ConnectedEnumerator
import dace
import numpy as np
import pytest
from dace.transformation.estimator import GreedyEnumerator
from dace.transformation.subgraph.composite import CompositeFusion
from dace.sdfg.graph import SubgraphView
from dace.transformation.subgraph.reduce_expansion import ReduceExpansion
W = dace.symbol('W')
H = dace.symbol('H')
B = dace.symbol('B')
@dace.program
def p1(in1: dace.float32[W, H, B], in2: dace.float32[W, H],
out: dace.float32[W, H]):
tmp1 = np.ndarray([W, H, B], dtype=dace.float32)
for i, j, k in dace.map[0:W, 0:H, 0:B]:
with dace.tasklet:
a << in1[i, j, k]
b << in2[i, j]
c >> tmp1[i, j, k]
c = a + b * 2
tmp2 = np.ndarray([W, H, B], dtype=dace.float32)
#tmp3 = np.ndarray([W, H], dtype=dace.float32)
for i, j, k in dace.map[0:W, 0:H, 0:B]:
with dace.tasklet:
a << tmp1[i, j, k]
c >> tmp2[i, j, k]
c = 3 * a
tmp3 = dace.reduce(lambda x, y: x + y, tmp1, axis=2, identity=0)
tmp4 = dace.reduce(lambda x, y: x + y, tmp2, axis=2, identity=0)
for i, j in dace.map[0:W, 0:H]:
with dace.tasklet:
a << tmp3[i, j]
b << tmp4[i, j]
c >> out[i, j]
c = a * 2 + b * 3 + 1
@pytest.mark.parametrize(["map_splits"], [[True], [False]])
def test_greedy(map_splits):
# Test diamond graph structure and ensure topologically correct enumeration
w = 30
h = 30
b = 20
A1 = np.random.rand(w, h, b).astype(np.float32)
A2 = np.random.rand(w, h).astype(np.float32)
ret = np.zeros([w, h], dtype=np.float32)
sdfg = p1.to_sdfg()
sdfg.apply_strict_transformations()
graph = sdfg.nodes()[0]
sdfg.apply_transformations_repeated(ReduceExpansion)
subgraph = SubgraphView(graph, graph.nodes())
composite = CompositeFusion(subgraph)
composite.expansion_split = map_splits
cf = lambda sdfg, subgraph: composite.can_be_applied(sdfg, subgraph)
enum = GreedyEnumerator(sdfg, graph, subgraph, cf)
result = enum.list()
if map_splits:
assert len(result) == 1
else:
assert len(result) == 2
@pytest.mark.parametrize(["map_splits"], [[True], [False]])
def test_connected(map_splits):
# Test diamond graph structure and ensure topologically correct enumeration
w = 30
h = 30
b = 20
A1 = np.random.rand(w, h, b).astype(np.float32)
A2 = np.random.rand(w, h).astype(np.float32)
ret = np.zeros([w, h], dtype=np.float32)
sdfg = p1.to_sdfg()
sdfg.apply_strict_transformations()
graph = sdfg.nodes()[0]
sdfg.apply_transformations_repeated(ReduceExpansion)
subgraph = SubgraphView(graph, graph.nodes())
composite = CompositeFusion(subgraph)
composite.expansion_split = map_splits
cf = lambda sdfg, subgraph: composite.can_be_applied(sdfg, subgraph)
enum = ConnectedEnumerator(sdfg, graph, subgraph, cf)
result = enum.list()
if map_splits:
assert len(result) == 14
else:
assert len(result) == 4
@pytest.mark.parametrize(["map_splits"], [[True], [False]])
def test_brute_force(map_splits):
# Test diamond graph structure and ensure topologically correct enumeration
w = 30
h = 30
b = 20
A1 = np.random.rand(w, h, b).astype(np.float32)
A2 = np.random.rand(w, h).astype(np.float32)
ret = np.zeros([w, h], dtype=np.float32)
sdfg = p1.to_sdfg()
sdfg.apply_strict_transformations()
graph = sdfg.nodes()[0]
sdfg.apply_transformations_repeated(ReduceExpansion)
subgraph = SubgraphView(graph, graph.nodes())
composite = CompositeFusion(subgraph)
composite.expansion_split = map_splits
cf = lambda sdfg, subgraph: composite.can_be_applied(sdfg, subgraph)
enum = BruteForceEnumerator(sdfg, graph, subgraph, cf)
result = enum.list()
if map_splits:
assert len(result) == 15
else:
assert len(result) == 5
if __name__ == "__main__":
test_greedy(True)
test_greedy(False)
test_connected(True)
test_connected(False)
test_brute_force(True)
test_brute_force(False)
| 30.344828 | 97 | 0.651591 |
e31ed9df7907a79212a5dedff080bbea0e55dd8a | 2,851 | py | Python | app/views.py | Yashwantbokadia/blockchainproject | 74269e1cf86c42a159243842d0b1ce13454f3f49 | [
"Apache-2.0"
] | null | null | null | app/views.py | Yashwantbokadia/blockchainproject | 74269e1cf86c42a159243842d0b1ce13454f3f49 | [
"Apache-2.0"
] | null | null | null | app/views.py | Yashwantbokadia/blockchainproject | 74269e1cf86c42a159243842d0b1ce13454f3f49 | [
"Apache-2.0"
] | null | null | null | import datetime
import json
from hashlib import sha256
import requests
from flask import render_template, redirect, request
from app import app
# The node with which our application interacts, there can be multiple
# such nodes as well.
CONNECTED_NODE_ADDRESS = "http://0.0.0.0:8000"
posts = []
def fetch_posts():
"""
Function to fetch the chain from a blockchain node, parse the
data and store it locally.
"""
get_chain_address = "{}/chain".format(CONNECTED_NODE_ADDRESS)
response = requests.get(get_chain_address)
if response.status_code == 200:
content = []
chain = json.loads(response.content)
for block in chain["chain"]:
print(block)
tx = {"index": block["index"], "senderHash":block["senderHash"], "reciver":block["reciver"], "amount":block["amount"], "hash":block["previous_hash"], "timestamp":block["timestamp"]}
content.append(tx)
global posts
"""posts = sorted(content, key=lambda k: k['timestamp'],
reverse=True)"""
posts = content
@app.route('/signin')#SignIn Page
def signinPage():
return render_template('signin.html')
@app.route('/')#SignIn Page
def root():
fetch_posts()
return render_template('index.html',
title='BLocky',
posts=posts,
senderHash = senderHash,
node_address=CONNECTED_NODE_ADDRESS,
readable_time=timestamp_to_string)
@app.route('/index', methods=['POST'])#Index(Main) Page
def index():
name = request.form["name"]
password = request.form["pasd"]
global senderHash
senderHash = sha256((name+password).encode()).hexdigest()
fetch_posts()
return render_template('index.html',
title='BLocky',
posts=posts,
senderHash = senderHash,
node_address=CONNECTED_NODE_ADDRESS,
readable_time=timestamp_to_string)
@app.route('/submit', methods=['POST'])#When hit submit in Index page
def submit_textarea():
"""
Endpoint to create a new transaction via our application.
"""
print(request.form["amount"])
amount = request.form["amount"]
reciver = request.form["reciver"]
post_object = {
'amount': amount,
'senderHash': senderHash,
'reciver': reciver
}
print(post_object)
# Submit a transaction
new_tx_address = "{}/new_transaction".format(CONNECTED_NODE_ADDRESS)
requests.post(new_tx_address,
json=post_object,
headers={'Content-type': 'application/json'})
return redirect('/')
def timestamp_to_string(epoch_time):
return datetime.datetime.fromtimestamp(epoch_time).strftime('%H:%M')
| 29.697917 | 193 | 0.612767 |
6d83ba7cfcd9411b4348c91ecf661bc58d85ea10 | 656 | py | Python | src/scripts/output.py | wnjustdoit/devops-py | 54dd722a577c4b3ecda45aa85c067130fd292ab9 | [
"Apache-2.0"
] | null | null | null | src/scripts/output.py | wnjustdoit/devops-py | 54dd722a577c4b3ecda45aa85c067130fd292ab9 | [
"Apache-2.0"
] | 6 | 2021-04-08T20:46:56.000Z | 2022-01-13T01:52:06.000Z | src/scripts/output.py | wnjustdoit/devops-py | 54dd722a577c4b3ecda45aa85c067130fd292ab9 | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/python3
import sys
def output_all(msg, resp=0, exit=False):
print(msg if resp == 0 else f'{msg}, status_code: {resp}')
if exit:
sys.exit(resp)
else:
sys.stdout.flush()
def output_strict(msg, resp):
if resp != 0:
output_error(msg, resp)
else:
output_std(msg)
def output_relaxed(msg, resp):
if resp != 0:
output_warn(msg, resp)
else:
output_std(msg)
def output_error(msg, resp=-1, exit=True):
output_all(f'ERROR: {msg}', resp, exit)
def output_warn(msg, resp=-1):
output_all(f'WARN: {msg}', resp)
def output_std(msg):
output_all(f'INFO: {msg}')
| 17.263158 | 62 | 0.599085 |
8ce40385b0814109bcc27fce85199ea3a1db93ae | 6,672 | py | Python | days/__init__.py | Kurocon/AdventOfCode2016 | 357e55025c763baa9d838b70025f968ca5e92ed7 | [
"BSD-3-Clause"
] | null | null | null | days/__init__.py | Kurocon/AdventOfCode2016 | 357e55025c763baa9d838b70025f968ca5e92ed7 | [
"BSD-3-Clause"
] | null | null | null | days/__init__.py | Kurocon/AdventOfCode2016 | 357e55025c763baa9d838b70025f968ca5e92ed7 | [
"BSD-3-Clause"
] | null | null | null | import os
import glob
import sys
import time
import traceback
from typing import Generator
import requests
from aocdays import AOCDays
modules = filter(lambda x: not x.startswith('_'), glob.glob(os.path.dirname(__file__) + "/*.py"))
__all__ = [os.path.basename(f)[:-3] for f in modules]
def day(day_number):
def day_decorator(cls):
if not str(cls.__module__).replace("days.", "").startswith("_"):
AOCDays.get_instance().add_day(day_number, cls)
return cls
return day_decorator
class AOCDay:
creator = "Kevin"
year = 2019
day_number = 0
session_token = ""
input_filename = ""
output_filename = ""
input_data = None
# Set to True to always print debug, or a combination of "1", "2", and "c" to print debug for part 1, 2, or common.
# E.g. the value "1c" will print debug for part 1 and common.
print_debug = False
_running_common = False
_running_part1 = False
_running_part2 = False
def __init__(self, year, day_number, session_token):
self.year = year
self.day_number = day_number
self.session_token = session_token
self.input_filename = os.path.join(os.path.dirname(__file__),
"../inputs/day{}_{}".format(self.day_number, "input"))
if self.creator == "Kevin":
self.output_filename = os.path.join(os.path.dirname(__file__),
"../outputs/day{}_{}".format(self.day_number, "output"))
else:
self.output_filename = os.path.join(os.path.dirname(__file__),
"../outputs/day{}_{}_{}".format(self.day_number, "output", self.creator))
def log(self, msg):
print(msg)
def debug(self, msg):
if self.print_debug == True or (
type(self.print_debug) == str and (
(self._running_common and "c" in self.print_debug) or
(self._running_part1 and "1" in self.print_debug) or
(self._running_part2 and "2" in self.print_debug)
)
):
print(msg)
def error(self, msg):
print(msg, file=sys.stderr)
def download_input(self):
if os.path.isfile(self.input_filename):
return
print("Could not find input data for day {}, please wait while I download it...".format(self.day_number))
input_url = "https://adventofcode.com/{}/day/{}/input".format(self.year, self.day_number)
result = requests.get(input_url, cookies={'session': self.session_token})
if result.status_code == 200:
self.input_data = result.text
with open(self.input_filename, 'w') as f:
f.write(result.text)
else:
raise ConnectionError("Could not connect to AoC website to download input data. "
"Error code {}: {}".format(result.status_code, result.text))
def load_input(self):
if self.input_filename:
with open(self.input_filename, 'r') as f:
self.input_data = [x.replace("\n", "") for x in f.readlines()]
if len(self.input_data) == 1:
self.input_data = self.input_data[0]
def run(self):
self.download_input()
self.load_input()
if os.path.isfile(self.output_filename):
os.remove(self.output_filename)
with open(self.output_filename, 'w') as output_file:
def dprint(thing):
print(thing, file=output_file)
print(thing)
input_data = self.input_data
start_time = time.time()
exception_or_output = False
test_exception = False
try:
test = self.test(input_data)
if test:
dprint("== Tests Output ==")
exception_or_output = True
for x in test:
dprint(x)
except Exception as e:
dprint("== Tests Error ==")
dprint(''.join(traceback.format_exception(None, e, e.__traceback__)))
exception_or_output = True
test_exception = True
if exception_or_output:
dprint("== Tests ran in {:.3f} ms ==".format((time.time() - start_time) * 1000))
dprint("")
if test_exception:
dprint("== NOT RUNNING PARTS BECAUSE OF TEST ERRORS ==")
else:
self._running_common = True
start_time = time.time()
common = self.common(input_data)
if common:
dprint("== Common ==")
for x in common:
dprint(x)
dprint("")
self._running_common = False
dprint("== Part 1 ==")
self._running_part1 = True
part1 = self.part1(input_data)
printed = False
if part1:
for x in part1:
if not printed:
printed = True
dprint(x)
if not printed:
dprint("(no output)")
dprint("== Ran in {:.3f} ms ==".format((time.time() - start_time) * 1000))
self._running_part1 = False
dprint("")
self._running_common = True
start_time = time.time()
common = self.common(input_data)
if common:
dprint("== Common ==")
for x in common:
dprint(x)
dprint("")
self._running_common = False
dprint("== Part 2 ==")
self._running_part2 = True
part2 = self.part2(input_data)
printed = False
if part2:
for x in part2:
if not printed:
printed = True
dprint(x)
if not printed:
dprint("(no output)")
dprint("== Ran in {:.3f} ms ==".format((time.time() - start_time) * 1000))
self._running_part2 = False
dprint("")
def test(self, input_data) -> Generator:
pass
def common(self, input_data) -> Generator:
pass
def part1(self, input_data) -> Generator:
pass
def part2(self, input_data) -> Generator:
pass
| 35.115789 | 121 | 0.508693 |
cbe773635576609b3f14606058b02d5345306388 | 103 | py | Python | Apps/StudentHousing/apps.py | ResidenciApp/ResidenciAppServidor | 7cc8c66bc07d198be37232fdc74d64227d14ce3d | [
"MIT"
] | 2 | 2019-10-31T03:21:07.000Z | 2019-12-11T16:25:33.000Z | Apps/StudentHousing/apps.py | ResidenciApp/ResidenciAppServidor | 7cc8c66bc07d198be37232fdc74d64227d14ce3d | [
"MIT"
] | 6 | 2021-03-19T02:43:26.000Z | 2022-02-10T10:42:00.000Z | Apps/StudentHousing/apps.py | lmbaeza/ResidenciAppServidor | 7cc8c66bc07d198be37232fdc74d64227d14ce3d | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class StudenthousingConfig(AppConfig):
name = 'StudentHousing'
| 17.166667 | 38 | 0.786408 |
3b5f58326a6aa6c38b6d39cfb38cbc9cfef91eda | 1,997 | py | Python | ingenico/direct/sdk/domain/get_hosted_tokenization_response.py | Ingenico/direct-sdk-python3 | d2b30b8e8afb307153a1f19ac4c054d5344449ce | [
"Apache-2.0"
] | null | null | null | ingenico/direct/sdk/domain/get_hosted_tokenization_response.py | Ingenico/direct-sdk-python3 | d2b30b8e8afb307153a1f19ac4c054d5344449ce | [
"Apache-2.0"
] | 1 | 2021-03-30T12:55:39.000Z | 2021-04-08T08:23:27.000Z | ingenico/direct/sdk/domain/get_hosted_tokenization_response.py | Ingenico/direct-sdk-python3 | d2b30b8e8afb307153a1f19ac4c054d5344449ce | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://support.direct.ingenico.com/documentation/api/reference/
#
from ingenico.direct.sdk.data_object import DataObject
from ingenico.direct.sdk.domain.token_response import TokenResponse
class GetHostedTokenizationResponse(DataObject):
__token = None
__token_status = None
@property
def token(self) -> TokenResponse:
"""
Type: :class:`ingenico.direct.sdk.domain.token_response.TokenResponse`
"""
return self.__token
@token.setter
def token(self, value: TokenResponse):
self.__token = value
@property
def token_status(self) -> str:
"""
| This is the status of the token in the hosted tokenization session. Possible values are:
| * UNCHANGED - The token has not changed
| * CREATED - The token has been created
| * UPDATED - The token has been updated
Type: str
"""
return self.__token_status
@token_status.setter
def token_status(self, value: str):
self.__token_status = value
def to_dictionary(self):
dictionary = super(GetHostedTokenizationResponse, self).to_dictionary()
if self.token is not None:
dictionary['token'] = self.token.to_dictionary()
if self.token_status is not None:
dictionary['tokenStatus'] = self.token_status
return dictionary
def from_dictionary(self, dictionary):
super(GetHostedTokenizationResponse, self).from_dictionary(dictionary)
if 'token' in dictionary:
if not isinstance(dictionary['token'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['token']))
value = TokenResponse()
self.token = value.from_dictionary(dictionary['token'])
if 'tokenStatus' in dictionary:
self.token_status = dictionary['tokenStatus']
return self
| 33.283333 | 98 | 0.654982 |
c89c1d461602400c605b6bd931a137353ceae761 | 7,013 | py | Python | reclist/metrics/distance_metrics.py | nsbits/reclist | 2a41e1de5c36626f8081468c89b96630d6127315 | [
"MIT"
] | 183 | 2021-11-16T13:29:58.000Z | 2022-03-28T08:31:52.000Z | reclist/metrics/distance_metrics.py | nsbits/reclist | 2a41e1de5c36626f8081468c89b96630d6127315 | [
"MIT"
] | 1 | 2021-11-30T13:33:42.000Z | 2021-12-05T15:29:50.000Z | reclist/metrics/distance_metrics.py | nsbits/reclist | 2a41e1de5c36626f8081468c89b96630d6127315 | [
"MIT"
] | 8 | 2021-12-10T22:50:32.000Z | 2022-03-27T22:09:14.000Z | from scipy.spatial.distance import cosine
import matplotlib.pyplot as plt
from reclist.current import current
import os
import json
import networkx as nx
from networkx.algorithms.shortest_paths.generic import shortest_path
from statistics import mean
from reclist.metrics.standard_metrics import sample_misses_at_k, sample_hits_at_k
import numpy as np
def error_by_cosine_distance(model, y_test, y_preds, k=3, bins=25, debug=False):
if not(hasattr(model.__class__, 'get_vector') and callable(getattr(model.__class__, 'get_vector'))):
error_msg = "Error : Model {} does not support retrieval of vector embeddings".format(model.__class__)
print(error_msg)
return error_msg
misses = sample_misses_at_k(y_preds, y_test, k=k, size=-1)
cos_distances = []
for m in misses:
if m['Y_PRED']:
vector_test = model.get_vector(m['Y_TEST'][0])
vector_pred = model.get_vector(m['Y_PRED'][0])
if vector_pred and vector_test:
cos_dist = cosine(vector_pred, vector_test)
cos_distances.append(cos_dist)
histogram = np.histogram(cos_distances, bins=bins, density=False)
# cast to list
histogram = (histogram[0].tolist(), histogram[1].tolist())
# debug / viz
if debug:
plt.hist(cos_distances, bins=bins)
plt.title('dist over cosine distance prod space')
plt.savefig(os.path.join(current.report_path,
'plots',
'distance_to_predictions.png'))
plt.clf()
# plt.show()
return {'mean': np.mean(cos_distances), 'histogram': histogram}
def distance_to_query(model, x_test, y_test, y_preds, k=3, bins=25, debug=False):
if not(hasattr(model.__class__, 'get_vector') and callable(getattr(model.__class__, 'get_vector'))):
error_msg = "Error : Model {} does not support retrieval of vector embeddings".format(model.__class__)
print(error_msg)
return error_msg
misses = sample_misses_at_k(y_preds, y_test, x_test=x_test, k=k, size=-1)
x_to_y_cos = []
x_to_p_cos = []
for m in misses:
if m['Y_PRED']:
vector_x = model.get_vector(m['X_TEST'][0])
vector_y = model.get_vector(m['Y_TEST'][0])
vectors_p = [model.get_vector(_) for _ in m['Y_PRED']]
c_dists =[]
if not vector_x or not vector_y:
continue
x_to_y_cos.append(cosine(vector_x, vector_y))
for v_p in vectors_p:
if not v_p:
continue
cos_dist = cosine(v_p, vector_x)
if cos_dist:
c_dists.append(cos_dist)
if c_dists:
x_to_p_cos.append(mean(c_dists))
h_xy = np.histogram(x_to_y_cos, bins=bins, density=False)
h_xp = np.histogram(x_to_p_cos, bins=bins, density=False)
h_xy = (h_xy[0].tolist(), h_xy[1].tolist())
h_xp = (h_xp[0].tolist(), h_xp[1].tolist())
# debug / viz
if debug:
plt.hist(x_to_y_cos, bins=bins, alpha=0.5)
plt.hist(x_to_p_cos, bins=bins, alpha=0.5)
plt.title('distribution of distance to input')
plt.legend(['Distance from Input to Label',
'Distance from Input to Label'],
loc='upper right')
# plt.show()
plt.savefig(os.path.join(current.report_path,
'plots',
'distance_to_query.png'))
plt.clf()
return {
'histogram_x_to_y': h_xy,
'histogram_x_to_p': h_xp,
'raw_distances_x_to_y': x_to_y_cos,
'raw_distances_x_to_p': x_to_p_cos,
}
def shortest_path_length():
pass
get_nodes = lambda nodes, ancestors="": [] if not nodes else ['_'.join([ancestors, nodes[0]])] + \
get_nodes(nodes[1:], '_'.join([ancestors, nodes[0]]))
def graph_distance_test(y_test, y_preds, product_data, k=3):
path_lengths = []
misses = sample_misses_at_k(y_preds, y_test, k=k, size=-1)
for _y, _y_p in zip([_['Y_TEST'] for _ in misses],
[_['Y_PRED'] for _ in misses]):
if not _y_p:
continue
_y_sku = _y[0]
_y_p_sku = _y_p[0]
if _y_sku not in product_data or _y_p_sku not in product_data:
continue
if not product_data[_y_sku]['CATEGORIES'] or not product_data[_y_p_sku]['CATEGORIES']:
continue
# extract graph information
catA = json.loads(product_data[_y_sku]['CATEGORIES'])
catB = json.loads(product_data[_y_p_sku]['CATEGORIES'])
catA_nodes = [get_nodes(c) for c in catA]
catB_nodes = [get_nodes(c) for c in catB]
all_nodes = list(set([n for c in catA_nodes + catB_nodes for n in c]))
all_edges = [(n1, n2) for c in catA_nodes + catB_nodes for n1, n2 in zip(c[:-1], c[1:])]
all_edges = list(set(all_edges))
# build graph
G = nx.Graph()
G.add_nodes_from(all_nodes)
G.add_edges_from(all_edges)
# get leaves
cat1_leaves = [c[-1] for c in catA_nodes]
cat2_leaves = [c[-1] for c in catB_nodes]
all_paths = [shortest_path(G, c1_l, c2_l) for c1_l in cat1_leaves for c2_l in cat2_leaves]
s_path = min(all_paths, key=len)
s_path_len = len(s_path) - 1
path_lengths.append(s_path_len)
histogram = np.histogram(path_lengths, bins=np.arange(0, max(path_lengths)))
histogram = (histogram[0].tolist(), histogram[1].tolist())
return {'mean': mean(path_lengths), 'hist': histogram}
def generic_cosine_distance(embeddings: dict,
type_fn,
y_test,
y_preds,
k=10,
bins=25,
debug=False):
misses = sample_misses_at_k(y_preds, y_test, k=k, size=-1)
cos_distances = []
for m in misses:
if m['Y_TEST'] and m['Y_PRED'] and type_fn(m['Y_TEST'][0]) and type_fn(m['Y_PRED'][0]):
vector_test = embeddings.get(type_fn(m['Y_TEST'][0]), None)
vector_pred = embeddings.get(type_fn(m['Y_PRED'][0]), None)
if vector_pred and vector_test:
cos_dist = cosine(vector_pred, vector_test)
cos_distances.append(cos_dist)
# TODO: Maybe sample some examples from the bins
histogram = np.histogram(cos_distances, bins=bins, density=False)
# cast to list
histogram = (histogram[0].tolist(), histogram[1].tolist())
# debug / viz
if debug:
plt.hist(cos_distances, bins=bins)
plt.title('cosine distance misses')
plt.savefig(os.path.join(current.report_path,
'plots',
'cosine_distance_over_type.png'))
plt.clf()
return {'mean': np.mean(cos_distances), 'histogram': histogram}
| 38.961111 | 114 | 0.592043 |
86fc6630d873bdd718740d1fe235fe1954e7cf67 | 461 | py | Python | evotor/query_manager/urls.py | trukanduk/evotor_hackathon | 5aeec1886c9ca5dbb2d08d535885701062464fb0 | [
"MIT"
] | null | null | null | evotor/query_manager/urls.py | trukanduk/evotor_hackathon | 5aeec1886c9ca5dbb2d08d535885701062464fb0 | [
"MIT"
] | null | null | null | evotor/query_manager/urls.py | trukanduk/evotor_hackathon | 5aeec1886c9ca5dbb2d08d535885701062464fb0 | [
"MIT"
] | null | null | null | from django.conf.urls import url
import query_manager.views
urlpatterns = [
url(r'^json/(?P<model_name>[a-zA-Z0-9_]{1,})/$', query_manager.views.json_query_view),
url(r'^html/(?P<model_name>[a-zA-Z0-9_]{1,})/$', query_manager.views.html_query_view),
url(r'^update/(?P<model_name>[a-zA-Z0-9_]{1,})/$', query_manager.views.json_update_query_view),
url(r'^insert/(?P<model_name>[a-zA-Z0-9_]{1,})/$', query_manager.views.json_insert_query_view),
]
| 46.1 | 99 | 0.694143 |
34d205a97755830046bd8dea8104ad7f85e8ee84 | 2,015 | py | Python | lib/surface/emulators/datastore/__init__.py | eyalev/gcloud | 421ee63a0a6d90a097e8530d53a6df5b905a0205 | [
"Apache-2.0"
] | null | null | null | lib/surface/emulators/datastore/__init__.py | eyalev/gcloud | 421ee63a0a6d90a097e8530d53a6df5b905a0205 | [
"Apache-2.0"
] | null | null | null | lib/surface/emulators/datastore/__init__.py | eyalev/gcloud | 421ee63a0a6d90a097e8530d53a6df5b905a0205 | [
"Apache-2.0"
] | 2 | 2020-11-04T03:08:21.000Z | 2020-11-05T08:14:41.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud datastore emulator group."""
from googlecloudsdk.api_lib.emulators import datastore_util
from googlecloudsdk.api_lib.emulators import util
from googlecloudsdk.calliope import base
class Datastore(base.Group):
"""Manage your local datastore emulator.
This set of commands allows you to start and use a local datastore emulator.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To start a local datastore emulator, run:
$ {command} start
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'--data-dir',
required=False,
help='The directory to be used to store/retrieve data/config for an'
' emulator run.')
parser.add_argument(
'--legacy',
default=True,
action='store_true',
help='Set to use the legacy emulator which supports Cloud Datastore'
' API v1beta2.')
def Filter(self, context, args):
util.CheckIfJava7IsInstalled(datastore_util.DATASTORE_TITLE)
if args.legacy:
util.EnsureComponentIsInstalled('gcd-emulator',
datastore_util.DATASTORE_TITLE)
else:
util.EnsureComponentIsInstalled('cloud-datastore-emulator',
datastore_util.DATASTORE_TITLE)
if not args.data_dir:
args.data_dir = datastore_util.GetDataDir()
| 33.583333 | 78 | 0.679901 |
780c656171bb67df04542fa31f6cef8b9e56f7c7 | 560 | py | Python | setup.py | dreamplatform/dream-profile | 6f9067db124f07aabd409b5f80d01db03655b85c | [
"BSD-3-Clause"
] | null | null | null | setup.py | dreamplatform/dream-profile | 6f9067db124f07aabd409b5f80d01db03655b85c | [
"BSD-3-Clause"
] | null | null | null | setup.py | dreamplatform/dream-profile | 6f9067db124f07aabd409b5f80d01db03655b85c | [
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
def get_version():
try:
import subprocess
p = subprocess.Popen('hg id -t', shell=True, stdout=subprocess.PIPE)
tag = p.stdout.read()[1:].strip()
return tag
except:
return 'dev'
setup(
name = "dream-profile",
version = get_version(),
license = 'Modified BSD',
description = "Dream platform User profile",
author = 'Haltu',
packages = find_packages(),
include_package_data = True,
zip_safe = False,
install_requires = [
]
)
| 21.538462 | 76 | 0.610714 |
7a130d0369cc14b6836d65dab3129fd8a64abefc | 22,952 | py | Python | main_decoder.py | okwrtdsh/3D-ResNets-PyTorch | f36a32ea8b283524d1d102937c49689b1f475b5f | [
"MIT"
] | null | null | null | main_decoder.py | okwrtdsh/3D-ResNets-PyTorch | f36a32ea8b283524d1d102937c49689b1f475b5f | [
"MIT"
] | null | null | null | main_decoder.py | okwrtdsh/3D-ResNets-PyTorch | f36a32ea8b283524d1d102937c49689b1f475b5f | [
"MIT"
] | null | null | null | import os
os.environ['PYTHONHASHSEED'] = '0'
import random
random.seed(12345)
import numpy as np
np.random.seed(42)
import torch
torch.manual_seed(123)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(123)
torch.backends.cudnn.enabled = True
from torch.backends import cudnn
cudnn.benchmark = True
import json
import numpy as np # noqa
import torch
from torch import nn
from torch import optim
from torch.optim import lr_scheduler
from opts import parse_opts
from model_decoder import generate_model
from mean import get_mean, get_std
from decoder_transforms import (
Compose, Normalize, Scale, CenterCrop, CornerCrop, MultiScaleCornerCrop,
MultiScaleRandomCrop, RandomHorizontalFlip, ToTensor, RGB2Gray, LowResolution)
from temporal_transforms import LoopPadding, TemporalRandomCrop, TemporalCenterCrop
from spatio_temporal_transforms import Coded, Averaged, OneFrame, ToTemporal, ToRepeat
from target_transforms import ClassLabel, VideoID
from target_transforms import Compose as TargetCompos # noqa
from dataset_decoder import get_training_set, get_validation_set, get_test_set
from utils import Logger
# from train_decoder import train_epoch
# from validation_decoder import val_epoch
import test
import re
###########################################################
import sys
import math
import torch
from torch.autograd import Variable
import time
import os
import sys
import numpy as np
from utils import AverageMeter, calculate_accuracy, save_gif, accuracy
from models.binarized_modules import binarizef
def train_epoch(epoch, data_loader, model, criterion_decoder, criterion_clf, optimizer, opt,
epoch_logger, batch_logger, device):
print('train at epoch {}'.format(epoch))
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
loss_mse = AverageMeter()
loss_ce = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end_time = time.time()
for i, (inputs, targets, target_labels) in enumerate(data_loader):
data_time.update(time.time() - end_time)
inputs = inputs.to(device)
targets = targets.to(device)
target_labels = target_labels.to(device)
outputs, outputs_clf = model(inputs)
loss1 = criterion_decoder(outputs, targets)
loss2 = criterion_clf(outputs_clf, target_labels)
loss = loss1 + loss2 * opt.alpha
prec1, prec5 = accuracy(outputs_clf.data, target_labels, topk=(1, 5))
top1.update(prec1, inputs.size(0))
top5.update(prec5, inputs.size(0))
losses.update(loss.item(), inputs.size(0))
loss_mse.update(loss1.item(), inputs.size(0))
loss_ce.update(loss2.item(), inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end_time)
end_time = time.time()
batch_logger.log({
'epoch': epoch,
'batch': i + 1,
'iter': (epoch - 1) * len(data_loader) + (i + 1),
'loss': losses.val,
'lr': optimizer.param_groups[0]['lr']
})
sys.stdout.flush()
sys.stdout.write('\rEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.sum:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'MSE: {mse.val:.4f} ({mse.avg:.4f})\t'
'PSNR: {psnr_val:.4f} ({psnr_avg:.4f})\t'
'CE: {ce.val:.4f} ({ce.avg:.4f})\t'
'Acc@1: {top1.val:.4f} ({top1.avg:.4f})\t'
'Acc@5: {top5.val:.4f} ({top5.avg:.4f})\t'
'\t\t'.format(
epoch,
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
mse=loss_mse,
ce=loss_ce,
top1=top1,
top5=top5,
psnr_val=10 * math.log10(1 / loss_mse.val),
psnr_avg=10 * math.log10(1 / loss_mse.avg),
))
sys.stdout.flush()
print('\n[Train] Epoch{0}\t'
'Time: {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
'Data: {data_time.sum:.3f} ({data_time.avg:.3f})\t'
'Loss: {loss.avg:.4f}\n'
'MSE: {mse.avg:.4f}\t'
'PSNR: {psnr_avg:.4f}\t'
'CE: {ce.avg:.4f}\t'
'Acc@1: {top1.avg:.4f}\t'
'Acc@5: {top5.avg:.4f}\t'
'\t\t'.format(
epoch,
batch_time=batch_time,
data_time=data_time,
loss=losses,
mse=loss_mse,
ce=loss_ce,
top1=top1,
top5=top5,
psnr_avg=10 * math.log10(1 / loss_mse.avg),
))
print()
epoch_logger.log({
'epoch': epoch,
'loss': losses.avg,
'mse': loss_mse.avg,
'psnr': 10 * math.log10(1 / loss_mse.avg),
'ce': loss_ce.avg,
'top1': top1.avg,
'top5': top5.avg,
'lr': optimizer.param_groups[0]['lr'],
'batch_time': batch_time.sum,
'data_time': data_time.sum,
})
if 'exp' in opt.model and not opt.load_path:
mask = binarizef(
list(model.parameters())[0]
).add_(1).div_(2).to('cpu').detach().numpy()
print('max', mask.max())
print('min', mask.min())
mask = mask.reshape((opt.sample_duration, 8, 8, 1)).astype(np.uint8)
assert mask.shape == (opt.sample_duration, 8, 8, 1)
# save_file_path = os.path.join(opt.result_path,
# 'mask_{}.npy'.format(epoch))
# np.save(save_file_path, mask)
save_file_path = os.path.join(opt.result_path,
'mask_{}.gif'.format(epoch))
save_gif(mask, save_file_path, vmax=1, vmin=0)
if epoch % opt.checkpoint == 0:
save_file_path = os.path.join(opt.result_path,
'save_{}.pth'.format(epoch))
states = {
'epoch': epoch + 1,
'arch': opt.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(states, save_file_path)
###########################################################
import torch
from torch.autograd import Variable
import time
import sys
import numpy as np
from utils import AverageMeter, calculate_accuracy, accuracy
def val_epoch(epoch, data_loader, model, criterion_decoder, criterion_clf, opt, logger, device):
print('validation at epoch {}'.format(epoch))
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
loss_mse = AverageMeter()
loss_ce = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end_time = time.time()
for i, (inputs, targets, target_labels) in enumerate(data_loader):
data_time.update(time.time() - end_time)
inputs = inputs.to(device)
targets = targets.to(device)
target_labels = target_labels.to(device)
outputs, outputs_clf = model(inputs)
loss1 = criterion_decoder(outputs, targets)
loss2 = criterion_clf(outputs_clf, target_labels)
loss = loss1 + loss2 * opt.alpha
losses.update(loss.item(), inputs.size(0))
loss_mse.update(loss1.item(), inputs.size(0))
loss_ce.update(loss2.item(), inputs.size(0))
prec1, prec5 = accuracy(outputs_clf.data, target_labels, topk=(1, 5))
top1.update(prec1, inputs.size(0))
top5.update(prec5, inputs.size(0))
if i == 0:
outputs = outputs.cpu().detach().numpy().reshape(-1, 16, 112, 112, 1)
for j, output in enumerate(outputs):
if j % 3 == 0 and j < 10:
save_gif_path = os.path.join(opt.result_path, 'val_%005d_sample%02d.gif' % (epoch, j))
save_gif((np.clip(output, 0, 1) * 255).reshape(16, 112, 112, 1).astype(np.uint8), save_gif_path, vmax=255, vmin=0, interval=2000/16)
batch_time.update(time.time() - end_time)
end_time = time.time()
sys.stdout.flush()
sys.stdout.write('\rEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.sum:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'MSE: {mse.val:.4f} ({mse.avg:.4f})\t'
'PSNR: {psnr_val:.4f} ({psnr_avg:.4f})\t'
'CE: {ce.val:.4f} ({ce.avg:.4f})\t'
'Acc@1: {top1.val:.4f} ({top1.avg:.4f})\t'
'Acc@5: {top5.val:.4f} ({top5.avg:.4f})\t'
'\t\t'.format(
epoch,
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
mse=loss_mse,
ce=loss_ce,
top1=top1,
top5=top5,
psnr_val=10 * math.log10(1 / loss_mse.val),
psnr_avg=10 * math.log10(1 / loss_mse.avg),
))
sys.stdout.flush()
print('\n[Val] Epoch{0}\t'
'Time: {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
'Data: {data_time.sum:.3f} ({data_time.avg:.3f})\t'
'Loss: {loss.avg:.4f}\n'
'MSE: {mse.avg:.4f}\t'
'PSNR: {psnr_avg:.4f}\t'
'CE: {ce.avg:.4f}\t'
'Acc@1: {top1.avg:.4f}\t'
'Acc@5: {top5.avg:.4f}\t'
'\t\t'.format(
epoch,
batch_time=batch_time,
data_time=data_time,
loss=losses,
mse=loss_mse,
ce=loss_ce,
top1=top1,
top5=top5,
psnr_avg=10 * math.log10(1 / loss_mse.avg),
))
print()
logger.log({
'epoch': epoch,
'loss': losses.avg,
'mse': loss_mse.avg,
'psnr': 10 * math.log10(1 / loss_mse.avg),
'ce': loss_ce.avg,
'top1': top1.avg,
'top5': top5.avg,
})
return losses.avg
###########################################################
import numpy as np
from glob import glob
import cv2
import math
from utils import save_gif
def psnr(img1, img2, vmax=1):
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
return 10 * math.log10(vmax * vmax / mse)
def eval_epoch(epoch, model, opt, device):
model.eval()
psnrs = []
size = opt.spatial_compress_size
GT = []
DATA = []
paths = sorted(glob('../EVAL_mat/EVAL14/*.npy'))
for i, path in enumerate(paths):
video = np.load(path).reshape(16, 256, 256)
video = np.array([cv2.resize(img, dsize=(112, 112), fx=1/size, fy=1/size).astype(np.uint8) for img in video])
GT.append(video.astype(np.float32) / 255)
DATA.append(np.array([cv2.resize(img, dsize=None, fx=1/size, fy=1/size).astype(np.uint8) for img in video]).astype(np.float32) / 255)
GT = np.array(GT).astype(np.float32)
DATA = np.array(DATA).astype(np.float32)
reconstructed = []
with torch.no_grad():
for i, path in enumerate(paths):
data = torch.from_numpy(DATA[i].reshape(1, 1, 16, 112//size, 112//size)).to(device).float()
output, _ = model(data)
output = output.cpu().detach().numpy().reshape(1, 16, 112, 112)
reconstructed.append(output)
for i, path in enumerate(paths):
p = psnr(GT[i], np.clip(reconstructed[i], 0, 1), vmax=1)
print(os.path.basename(path).replace('.npy', ':'), p)
save_gif_path = os.path.join(opt.result_path, ('eval_%005d_' % epoch) + os.path.basename(path).replace('.npy', '.gif'))
save_gif((np.clip(reconstructed[i], 0, 1) * 255).reshape(16, 112, 112, 1).astype(np.uint8), save_gif_path, vmax=255, vmin=0, interval=2000/16)
psnrs.append(p)
print(np.mean(psnrs))
###########################################################
if __name__ == '__main__':
opt = parse_opts()
if opt.root_path != '':
#opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
opt.std = get_std(opt.norm_value)
print(opt)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file, indent=2)
torch.manual_seed(opt.manual_seed)
model, parameters = generate_model(opt)
print(model)
from torch.backends import cudnn
cudnn.benchmark = True
criterion_decoder = nn.MSELoss()
criterion_clf = nn.CrossEntropyLoss()
device = torch.device("cpu" if opt.no_cuda else "cuda")
if not opt.no_cuda:
criterion_decoder = criterion_decoder.to(device)
criterion_clf = criterion_clf.to(device)
if opt.no_mean_norm and not opt.std_norm:
norm_method = Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = Normalize(opt.mean, [1, 1, 1])
else:
norm_method = Normalize(opt.mean, opt.std)
if not opt.no_train:
assert opt.train_crop in ['random', 'corner', 'center']
if opt.train_crop == 'random':
crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
elif opt.train_crop == 'corner':
crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
elif opt.train_crop == 'center':
crop_method = MultiScaleCornerCrop(
opt.scales, opt.sample_size, crop_positions=['c'])
common_temporal_transform = TemporalRandomCrop(opt.sample_duration)
common_spatial_transform = Compose([
crop_method,
RandomHorizontalFlip(),
RGB2Gray(),
])
target_spatial_transform = Compose([
ToTensor(opt.norm_value), norm_method,
])
input_spatial_transform = Compose([
LowResolution(opt.spatial_compress_size, use_cv2=opt.use_cv2),
ToTensor(opt.norm_value), norm_method,
])
target_label_transform = ClassLabel()
training_data = get_training_set(
opt, common_temporal_transform, common_spatial_transform,
target_spatial_transform, input_spatial_transform, target_label_transform)
train_loader = torch.utils.data.DataLoader(
training_data,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_threads,
pin_memory=True)
train_logger = Logger(
os.path.join(opt.result_path, 'train.log'),
['epoch', 'loss', 'mse', 'psnr', 'ce', 'top1', 'top5', 'lr', 'batch_time', 'data_time'])
train_batch_logger = Logger(
os.path.join(opt.result_path, 'train_batch.log'),
['epoch', 'batch', 'iter', 'loss', 'lr'])
sv_rgx = re.compile(r'.*\.weight\d+\..*')
if opt.nesterov:
dampening = 0
else:
dampening = opt.dampening
if opt.optimizer == 'sgd':
params = []
for i, ((name, _), p) in enumerate(zip(model.named_parameters(), parameters)):
if 'exp' in name:
print('{}*: {}({})'.format(i, name, p.shape))
params.append({
"params": p,
"lr": opt.learning_rate * opt.lr_exp_rate})
elif sv_rgx.match(name):
print('{}**: {}({})'.format(i, name, p.shape))
params.append({
"params": p,
"lr": opt.learning_rate * 64 * opt.lr_pt_rate})
else:
print('{}: {}({})'.format(i, name, p.shape))
params.append({"params": p})
optimizer = optim.SGD(
params,
lr=opt.learning_rate,
momentum=opt.momentum,
dampening=dampening,
weight_decay=opt.weight_decay,
nesterov=opt.nesterov)
elif opt.optimizer == 'adam':
params = []
for i, ((name, _), p) in enumerate(zip(model.named_parameters(), parameters)):
if 'exp' in name:
print('{}*: {}({})'.format(i, name, p.shape))
params.append({
"params": p,
"lr": opt.learning_rate * opt.lr_exp_rate})
elif sv_rgx.match(name):
print('{}**: {}({})'.format(i, name, p.shape))
params.append({
"params": p,
"lr": opt.learning_rate * 64 * opt.lr_pt_rate})
else:
print('{}: {}({})'.format(i, name, p.shape))
params.append({"params": p})
optimizer = optim.Adam(
params,
lr=opt.learning_rate)
elif opt.optimizer == 'rmsprop':
params = []
for i, ((name, _), p) in enumerate(zip(model.named_parameters(), parameters)):
if 'exp' in name:
print('{}*: {}({})'.format(i, name, p.shape))
params.append({
"params": p,
"lr": opt.learning_rate * opt.lr_exp_rate})
elif sv_rgx.match(name):
print('{}**: {}({})'.format(i, name, p.shape))
params.append({
"params": p,
"lr": opt.learning_rate * 64 * opt.lr_pt_rate})
else:
print('{}: {}({})'.format(i, name, p.shape))
params.append({"params": p})
optimizer = optim.RMSprop(
params,
lr=opt.learning_rate)
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', patience=opt.lr_patience)
if not opt.no_val:
common_temporal_transform = TemporalCenterCrop(opt.sample_duration)
common_spatial_transform = Compose([
Scale(opt.sample_size),
CenterCrop(opt.sample_size),
RGB2Gray(),
])
target_spatial_transform = Compose([
ToTensor(opt.norm_value), norm_method,
])
input_spatial_transform = Compose([
LowResolution(opt.spatial_compress_size, use_cv2=opt.use_cv2),
ToTensor(opt.norm_value), norm_method,
])
target_label_transform = ClassLabel()
validation_data = get_validation_set(
opt, common_temporal_transform, common_spatial_transform,
target_spatial_transform, input_spatial_transform, target_label_transform)
val_loader = torch.utils.data.DataLoader(
validation_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
val_logger = Logger(
os.path.join(opt.result_path, 'val.log'),
['epoch', 'loss', 'mse', 'psnr', 'ce', 'top1', 'top5'])
if opt.resume_path:
print('loading checkpoint {}'.format(opt.resume_path))
checkpoint = torch.load(opt.resume_path)
assert opt.arch == checkpoint['arch']
opt.begin_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if not opt.no_train:
optimizer.load_state_dict(checkpoint['optimizer'])
if opt.fixed_mask:
check = False
for i, (name, param) in enumerate(model.named_parameters()):
if name == 'module.exp.weight':
print(name, 'FIXED!')
param.requires_grad = False
check = True
if not check:
raise
model.to(device)
print('run')
for i in range(opt.begin_epoch, opt.n_epochs + 1):
if not opt.no_train:
train_epoch(i, train_loader, model, criterion_decoder, criterion_clf, optimizer, opt,
train_logger, train_batch_logger, device)
if not opt.no_val:
validation_loss = val_epoch(i, val_loader, model, criterion_decoder, criterion_clf, opt,
val_logger, device)
if i % 5 == 0:
eval_epoch(i, model, opt, device)
if not opt.no_train and not opt.no_val:
scheduler.step(validation_loss)
if opt.test:
common_temporal_transform = LoopPadding(opt.sample_duration)
common_spatial_transform = Compose([
Scale(int(opt.sample_size / opt.scale_in_test)),
CornerCrop(opt.sample_size, opt.crop_position_in_test),
RGB2Gray(),
])
target_spatial_transform = Compose([
ToTensor(opt.norm_value), norm_method,
])
input_spatial_transform = Compose([
LowResolution(opt.spatial_compress_size, use_cv2=opt.use_cv2),
ToTensor(opt.norm_value), norm_method,
])
target_label_transform = VideoID()
spatial_transform = Compose([
Scale(int(opt.sample_size / opt.scale_in_test)),
CornerCrop(opt.sample_size, opt.crop_position_in_test),
RGB2Gray(),
ToTensor(opt.norm_value), norm_method,
])
test_data = get_test_set(
opt, common_temporal_transform, common_spatial_transform,
target_spatial_transform, input_spatial_transform, target_label_transform)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
test.test(test_loader, model, opt, test_data.class_names, device)
| 38.253333 | 152 | 0.547229 |
527754431164a6dd17227d9119b53df9ea5e1d06 | 9,403 | py | Python | CalibCalorimetry/HcalPlugins/python/Hcal_Conditions_forGlobalTag_cff.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | CalibCalorimetry/HcalPlugins/python/Hcal_Conditions_forGlobalTag_cff.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | CalibCalorimetry/HcalPlugins/python/Hcal_Conditions_forGlobalTag_cff.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
## Recalibration Parameters
from DataFormats.HcalCalibObjects.HFRecalibrationParameters_cff import *
hcal_db_producer = cms.ESProducer("HcalDbProducer",
dump = cms.untracked.vstring(''),
file = cms.untracked.string('')
)
#------------------------- HARDCODED conditions
es_hardcode = cms.ESSource("HcalHardcodeCalibrations",
toGet = cms.untracked.vstring('GainWidths'),
iLumi = cms.double(-1.), # for Upgrade: fb-1
HBRecalibration = cms.bool(False), # True for Upgrade
HBreCalibCutoff = cms.double(20.), # if above is True
HBmeanenergies = cms.FileInPath("CalibCalorimetry/HcalPlugins/data/meanenergiesHB.txt"),
HERecalibration = cms.bool(False), # True for Upgrade
HEreCalibCutoff = cms.double(20.), # if above is True
HEmeanenergies = cms.FileInPath("CalibCalorimetry/HcalPlugins/data/meanenergiesHE.txt"),
HFRecalibration = cms.bool(False), # True for Upgrade
HFRecalParameterBlock = HFRecalParameterBlock,
GainWidthsForTrigPrims = cms.bool(False), # True Upgrade
useHBUpgrade = cms.bool(False),
useHEUpgrade = cms.bool(False),
useHFUpgrade = cms.bool(False),
useHOUpgrade = cms.bool(True),
testHFQIE10 = cms.bool(False),
testHEPlan1 = cms.bool(False),
killHE = cms.bool(False),
useLayer0Weight = cms.bool(False),
hb = cms.PSet(
pedestal = cms.double(3.285),
pedestalWidth = cms.double(0.809),
gain = cms.vdouble(0.19),
gainWidth = cms.vdouble(0.0),
qieType = cms.int32(0),
qieOffset = cms.vdouble(-0.49,1.8,7.2,37.9),
qieSlope = cms.vdouble(0.912,0.917,0.922,0.923),
mcShape = cms.int32(125),
recoShape = cms.int32(105),
photoelectronsToAnalog = cms.double(0.3305),
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
),
he = cms.PSet(
pedestal = cms.double(3.163),
pedestalWidth = cms.double(0.9698),
gain = cms.vdouble(0.23),
gainWidth = cms.vdouble(0),
qieType = cms.int32(0),
qieOffset = cms.vdouble(-0.38,2.0,7.6,39.6),
qieSlope = cms.vdouble(0.912,0.916,0.920,0.922),
mcShape = cms.int32(125),
recoShape = cms.int32(105),
photoelectronsToAnalog = cms.double(0.3305),
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
),
hf = cms.PSet(
pedestal = cms.double(9.354),
pedestalWidth = cms.double(2.516),
gain = cms.vdouble(0.14,0.135),
gainWidth = cms.vdouble(0.0,0.0),
qieType = cms.int32(0),
qieOffset = cms.vdouble(-0.87,1.4,7.8,-29.6),
qieSlope = cms.vdouble(0.359,0.358,0.360,0.367),
mcShape = cms.int32(301),
recoShape = cms.int32(301),
photoelectronsToAnalog = cms.double(0.0),
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
),
ho = cms.PSet(
pedestal = cms.double(12.06),
pedestalWidth = cms.double(0.6285),
gain = cms.vdouble(0.0060,0.0087),
gainWidth = cms.vdouble(0.0,0.0),
qieType = cms.int32(0),
qieOffset = cms.vdouble(-0.44,1.4,7.1,38.5),
qieSlope = cms.vdouble(0.907,0.915,0.920,0.921),
mcShape = cms.int32(201),
recoShape = cms.int32(201),
photoelectronsToAnalog = cms.double(4.0),
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
),
hbUpgrade = cms.PSet(
pedestal = cms.double(17.3),
pedestalWidth = cms.double(1.5),
gain = cms.vdouble(1/2276.), #51.72 is pe/GeV 44.0 is fC/pe.
gainWidth = cms.vdouble(0),
qieType = cms.int32(2),
qieOffset = cms.vdouble(0.,0.,0.,0.),
qieSlope = cms.vdouble(0.05376,0.05376,0.05376,0.05376), #1/(3.1*6) where 6 is shunt factor
mcShape = cms.int32(203),
recoShape = cms.int32(203),
photoelectronsToAnalog = cms.double(44.0),
darkCurrent = cms.vdouble(0.01,0.015),
doRadiationDamage = cms.bool(True),
radiationDamage = cms.PSet(
temperatureBase = cms.double(20),
temperatureNew = cms.double(-5),
intlumiOffset = cms.double(150),
depVsTemp = cms.double(0.0631),
intlumiToNeutrons = cms.double(3.67e8),
depVsNeutrons = cms.vdouble(5.69e-11,7.90e-11),
),
),
heUpgrade = cms.PSet(
pedestal = cms.double(17.3),
pedestalWidth = cms.double(1.5),
gain = cms.vdouble(1/2276.), #51.72 is pe/GeV 44.0 is fC/pe.
gainWidth = cms.vdouble(0),
qieType = cms.int32(2),
qieOffset = cms.vdouble(0.,0.,0.,0.),
qieSlope = cms.vdouble(0.05376,0.05376,0.05376,0.05376), #1/(3.1*6) where 6 is shunt factor
mcShape = cms.int32(203),
recoShape = cms.int32(203),
photoelectronsToAnalog = cms.double(44.0),
darkCurrent = cms.vdouble(0.01,0.015),
doRadiationDamage = cms.bool(True),
radiationDamage = cms.PSet(
temperatureBase = cms.double(20),
temperatureNew = cms.double(5),
intlumiOffset = cms.double(75),
depVsTemp = cms.double(0.0631),
intlumiToNeutrons = cms.double(2.92e7),
depVsNeutrons = cms.vdouble(5.69e-11,7.90e-11),
),
),
hfUpgrade = cms.PSet(
pedestal = cms.double(13.33),
pedestalWidth = cms.double(3.33),
gain = cms.vdouble(0.14,0.135),
gainWidth = cms.vdouble(0.0,0.0),
qieType = cms.int32(1),
qieOffset = cms.vdouble(0.0697,-0.7405,12.38,-671.9),
qieSlope = cms.vdouble(0.297,0.298,0.298,0.313),
mcShape = cms.int32(301),
recoShape = cms.int32(301),
photoelectronsToAnalog = cms.double(0.0),
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
),
# types (in order): HcalHOZecotek, HcalHOHamamatsu, HcalHEHamamatsu1, HcalHEHamamatsu2, HcalHBHamamatsu1, HcalHBHamamatsu2, HcalHPD
SiPMCharacteristics = cms.VPSet(
cms.PSet( pixels = cms.int32(36000), crosstalk = cms.double(0.0), nonlin1 = cms.double(1.0), nonlin2 = cms.double(0.0), nonlin3 = cms.double(0.0) ),
cms.PSet( pixels = cms.int32(2500), crosstalk = cms.double(0.0), nonlin1 = cms.double(1.0), nonlin2 = cms.double(0.0), nonlin3 = cms.double(0.0) ),
cms.PSet( pixels = cms.int32(27370), crosstalk = cms.double(0.17), nonlin1 = cms.double(1.00985), nonlin2 = cms.double(7.84089E-6), nonlin3 = cms.double(2.86282E-10) ),
cms.PSet( pixels = cms.int32(38018), crosstalk = cms.double(0.196), nonlin1 = cms.double(1.00546), nonlin2 = cms.double(6.40239E-6), nonlin3 = cms.double(1.27011E-10) ),
cms.PSet( pixels = cms.int32(27370), crosstalk = cms.double(0.17), nonlin1 = cms.double(1.00985), nonlin2 = cms.double(7.84089E-6), nonlin3 = cms.double(2.86282E-10) ),
cms.PSet( pixels = cms.int32(38018), crosstalk = cms.double(0.196), nonlin1 = cms.double(1.00546), nonlin2 = cms.double(6.40239E-6), nonlin3 = cms.double(1.27011E-10) ),
cms.PSet( pixels = cms.int32(0), crosstalk = cms.double(0.0), nonlin1 = cms.double(1.0), nonlin2 = cms.double(0.0), nonlin3 = cms.double(0.0) ),
),
)
es_prefer_hcalHardcode = cms.ESPrefer("HcalHardcodeCalibrations", "es_hardcode")
from Configuration.Eras.Modifier_hcalHardcodeConditions_cff import hcalHardcodeConditions
hcalHardcodeConditions.toModify( es_hardcode,
toGet = cms.untracked.vstring(
'GainWidths',
'MCParams',
'RecoParams',
'RespCorrs',
'QIEData',
'QIETypes',
'Gains',
'Pedestals',
'PedestalWidths',
'ChannelQuality',
'ZSThresholds',
'TimeCorrs',
'LUTCorrs',
'LutMetadata',
'L1TriggerObjects',
'PFCorrs',
'ElectronicsMap',
'FrontEndMap',
'CovarianceMatrices',
'SiPMParameters',
'SiPMCharacteristics',
'TPChannelParameters',
'TPParameters',
'FlagHFDigiTimeParams'
),
GainWidthsForTrigPrims = cms.bool(True)
)
from Configuration.Eras.Modifier_run2_HCAL_2017_cff import run2_HCAL_2017
from Configuration.Eras.Modifier_run2_HF_2017_cff import run2_HF_2017
from Configuration.Eras.Modifier_run2_HE_2017_cff import run2_HE_2017
from Configuration.Eras.Modifier_run2_HEPlan1_2017_cff import run2_HEPlan1_2017
from Configuration.Eras.Modifier_run3_HB_cff import run3_HB
run2_HCAL_2017.toModify( es_hardcode, useLayer0Weight = cms.bool(True) )
run2_HF_2017.toModify( es_hardcode, useHFUpgrade = cms.bool(True) )
run2_HE_2017.toModify( es_hardcode, useHEUpgrade = cms.bool(True), HEreCalibCutoff = cms.double(100.0) )
run2_HEPlan1_2017.toModify( es_hardcode, testHEPlan1 = cms.bool(True), useHEUpgrade = cms.bool(False), HEreCalibCutoff = cms.double(20.0) )
run3_HB.toModify( es_hardcode, useHBUpgrade = cms.bool(True), HBreCalibCutoff = cms.double(100.0) )
from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal
phase2_hgcal.toModify( es_hardcode, killHE = cms.bool(True) )
| 45.425121 | 177 | 0.618526 |
796e16b3465b75c30746beb085526ab3cb413525 | 33,850 | py | Python | server/src/weblab/core/coordinator/redis/priority_queue_scheduler.py | zstars/weblabdeusto | 09bd9d93d483671bca67ee5c70a9c412eb5d352f | [
"BSD-2-Clause"
] | null | null | null | server/src/weblab/core/coordinator/redis/priority_queue_scheduler.py | zstars/weblabdeusto | 09bd9d93d483671bca67ee5c70a9c412eb5d352f | [
"BSD-2-Clause"
] | null | null | null | server/src/weblab/core/coordinator/redis/priority_queue_scheduler.py | zstars/weblabdeusto | 09bd9d93d483671bca67ee5c70a9c412eb5d352f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
import time
import datetime
import random
import json
from voodoo.log import logged
import voodoo.log as log
from voodoo.typechecker import typecheck
from voodoo.gen import CoordAddress
import voodoo.sessions.session_id as SessionId
from voodoo.override import Override
from weblab.core.coordinator.exc import ExpiredSessionError
from weblab.core.coordinator.scheduler_transactions_synchronizer import SchedulerTransactionsSynchronizer
from weblab.core.coordinator.scheduler import Scheduler
import weblab.core.coordinator.status as WSS
from weblab.core.coordinator.resource import Resource
from weblab.data.experiments import ExperimentInstanceId, ExperimentId
from weblab.core.coordinator.redis.constants import (
WEBLAB_RESOURCE_RESERVATION_PQUEUE,
WEBLAB_RESOURCE_SLOTS,
WEBLAB_RESOURCE_RESERVATIONS,
WEBLAB_RESOURCE_PQUEUE_RESERVATIONS,
WEBLAB_RESOURCE_PQUEUE_POSITIONS,
WEBLAB_RESOURCE_PQUEUE_MAP,
WEBLAB_RESOURCE_PQUEUE_SORTED,
WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS,
LAB_COORD,
CLIENT_INITIAL_DATA,
REQUEST_INFO,
EXPERIMENT_TYPE,
EXPERIMENT_INSTANCE,
START_TIME,
TIME,
INITIALIZATION_IN_ACCOUNTING,
PRIORITY,
TIMESTAMP_BEFORE,
TIMESTAMP_AFTER,
LAB_SESSION_ID,
EXP_INFO,
INITIAL_CONFIGURATION,
RESOURCE_INSTANCE,
ACTIVE_STATUS,
STATUS_RESERVED,
STATUS_WAITING_CONFIRMATION,
)
EXPIRATION_TIME = 3600 # seconds
DEBUG = False
###########################################################
#
# TODO write some documentation
#
def exc_checker(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
if DEBUG:
import traceback
traceback.print_exc()
log.log(
PriorityQueueScheduler, log.level.Error,
"Unexpected exception while running %s" % func.__name__ )
log.log_exc(PriorityQueueScheduler, log.level.Warning)
raise
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
TIME_ANTI_RACE_CONDITIONS = 0.1
class PriorityQueueScheduler(Scheduler):
def __init__(self, generic_scheduler_arguments, randomize_instances = True, **kwargs):
super(PriorityQueueScheduler, self).__init__(generic_scheduler_arguments, **kwargs)
self.randomize_instances = randomize_instances
self._synchronizer = SchedulerTransactionsSynchronizer(self)
self._synchronizer.start()
@Override(Scheduler)
def stop(self):
self._synchronizer.stop()
@Override(Scheduler)
def is_remote(self):
return False
@exc_checker
@logged()
@Override(Scheduler)
@typecheck(typecheck.ANY, typecheck.ANY, Resource)
def removing_current_resource_slot(self, client, resource):
weblab_resource_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (resource.resource_type, resource.resource_instance)
current_reservation_ids = client.smembers(weblab_resource_instance_reservations)
if len(current_reservation_ids) > 0:
current_reservation_id = list(current_reservation_ids)[0]
if client.srem(weblab_resource_instance_reservations, current_reservation_id):
self.reservations_manager.downgrade_confirmation(current_reservation_id)
self.resources_manager.release_resource(resource)
# Remove data that was added when confirmed
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, current_reservation_id)
reservation_data_str = client.get(weblab_reservation_pqueue)
reservation_data = json.loads(reservation_data_str)
reservation_data.pop(ACTIVE_STATUS, None)
reservation_data.pop(TIMESTAMP_BEFORE, None)
reservation_data.pop(TIMESTAMP_AFTER, None)
reservation_data.pop(LAB_SESSION_ID, None)
reservation_data.pop(EXP_INFO, None)
reservation_data_str = json.dumps(reservation_data)
reservation_data = client.set(weblab_reservation_pqueue, reservation_data_str)
# Add back to the queue
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
filled_reservation_id = client.hget(weblab_resource_pqueue_map, current_reservation_id)
client.zadd(weblab_resource_pqueue_sorted, filled_reservation_id, -1)
return True
return False
@exc_checker
@logged()
@Override(Scheduler)
def reserve_experiment(self, reservation_id, experiment_id, time, priority, initialization_in_accounting, client_initial_data, request_info):
"""
priority: the less, the more priority
"""
client = self.redis_maker()
# For indexing purposes
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
weblab_resource_reservations = WEBLAB_RESOURCE_RESERVATIONS % self.resource_type_name
# Queue management
weblab_resource_pqueue_reservations = WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name
weblab_resource_pqueue_positions = WEBLAB_RESOURCE_PQUEUE_POSITIONS % self.resource_type_name
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
# Within the same priority, we want all to sort all the requests by the order they came.
# In order to support this, we increase a long enough value and put it before the reservaiton_id
current_position = client.incr(weblab_resource_pqueue_positions)
filled_reservation_id = "%s_%s" % (str(current_position).zfill(100), reservation_id)
pipeline = client.pipeline()
pipeline.hset(weblab_resource_pqueue_map, reservation_id, filled_reservation_id)
pipeline.zadd(weblab_resource_pqueue_sorted, filled_reservation_id, priority)
pipeline.sadd(weblab_resource_reservations, reservation_id)
pipeline.sadd(weblab_resource_pqueue_reservations, reservation_id)
generic_data = {
TIME : time,
INITIALIZATION_IN_ACCOUNTING : initialization_in_accounting,
PRIORITY : priority,
}
pipeline.set(weblab_reservation_pqueue, json.dumps(generic_data))
pipeline.execute()
return self.get_reservation_status(reservation_id)
#######################################################################
#
# Given a reservation_id, it returns in which state the reservation is
#
@exc_checker
@logged()
@Override(Scheduler)
def get_reservation_status(self, reservation_id):
self._remove_expired_reservations()
expired = self.reservations_manager.update(reservation_id)
if expired:
self._delete_reservation(reservation_id)
raise ExpiredSessionError("Expired reservation")
self._synchronizer.request_and_wait()
reservation_id_with_route = '%s;%s.%s' % (reservation_id, reservation_id, self.core_server_route)
client = self.redis_maker()
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
reservation_data_str = client.get(weblab_reservation_pqueue)
if reservation_data_str is None:
log.log(
PriorityQueueScheduler, log.level.Error,
"get_reservation_status called with a reservation_id that is not registered (not found on weblab_reservation_pqueue). Returning a WaitingInstanceStatus")
return WSS.WaitingInstancesQueueStatus(reservation_id_with_route, 50)
reservation_data = json.loads(reservation_data_str)
if ACTIVE_STATUS in reservation_data:
# Reserved or Waiting reservation
status = reservation_data[ACTIVE_STATUS]
# It may be just waiting for the experiment server to respond
if status == STATUS_WAITING_CONFIRMATION:
return WSS.WaitingConfirmationQueueStatus(reservation_id_with_route, self.core_server_url)
# Or the experiment server already responded and therefore we have all this data
str_lab_coord_address = reservation_data[LAB_COORD]
obtained_time = reservation_data[TIME]
initialization_in_accounting = reservation_data[INITIALIZATION_IN_ACCOUNTING]
lab_session_id = reservation_data[LAB_SESSION_ID]
initial_configuration = reservation_data[INITIAL_CONFIGURATION]
timestamp_before_tstamp = reservation_data[TIMESTAMP_BEFORE]
timestamp_after_tstamp = reservation_data[TIMESTAMP_AFTER]
if EXP_INFO in reservation_data and reservation_data[EXP_INFO]:
exp_info = json.loads(reservation_data[EXP_INFO])
else:
exp_info = {}
timestamp_before = datetime.datetime.fromtimestamp(timestamp_before_tstamp)
timestamp_after = datetime.datetime.fromtimestamp(timestamp_after_tstamp)
lab_coord_address = CoordAddress.translate(str_lab_coord_address)
if initialization_in_accounting:
before = timestamp_before_tstamp
else:
before = timestamp_after_tstamp
if before is not None:
remaining = (before + obtained_time) - self.time_provider.get_time()
else:
remaining = obtained_time
return WSS.LocalReservedStatus(reservation_id_with_route, lab_coord_address, SessionId.SessionId(lab_session_id), exp_info, obtained_time, initial_configuration, timestamp_before, timestamp_after, initialization_in_accounting, remaining, self.core_server_url)
# else it's waiting
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
filled_reservation_id = client.hget(weblab_resource_pqueue_map, reservation_id)
if filled_reservation_id is None:
log.log(
PriorityQueueScheduler, log.level.Error,
"get_reservation_status called with a reservation_id that is not registered (not found on the reservations map). Returning a WaitingInstanceStatus")
return WSS.WaitingInstancesQueueStatus(reservation_id_with_route, 50)
position = client.zrank(weblab_resource_pqueue_sorted, filled_reservation_id)
if position is None: # It's not in the queue now
time.sleep(TIME_ANTI_RACE_CONDITIONS * random.random())
return self.get_reservation_status(reservation_id)
if self.resources_manager.are_resource_instances_working(self.resource_type_name):
return WSS.WaitingQueueStatus(reservation_id_with_route, position)
else:
return WSS.WaitingInstancesQueueStatus(reservation_id_with_route, position)
################################################################
#
# Called when it is confirmed by the Laboratory Server.
#
@exc_checker
@logged()
@Override(Scheduler)
def confirm_experiment(self, reservation_id, lab_session_id, initial_configuration, exp_info):
self._remove_expired_reservations()
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
client = self.redis_maker()
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
if pqueue_reservation_data_str is None:
return
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
resource_instance_str = pqueue_reservation_data.get(RESOURCE_INSTANCE)
if resource_instance_str is not None:
resource_instance = Resource.parse(resource_instance_str)
if not self.resources_manager.check_working(resource_instance):
# TODO: if the experiment is broken and the student is ACTIVE_STATUS, something should be done
#
return
pqueue_reservation_data[LAB_SESSION_ID] = lab_session_id.id
pqueue_reservation_data[INITIAL_CONFIGURATION] = initial_configuration
pqueue_reservation_data[TIMESTAMP_AFTER] = self.time_provider.get_time()
pqueue_reservation_data[ACTIVE_STATUS] = STATUS_RESERVED
pqueue_reservation_data[EXP_INFO] = json.dumps(exp_info)
pqueue_reservation_data_str = json.dumps(pqueue_reservation_data)
client.set(weblab_reservation_pqueue, pqueue_reservation_data_str)
################################################################
#
# Called when the user disconnects or finishes the resource.
#
@exc_checker
@logged()
@Override(Scheduler)
def finish_reservation(self, reservation_id):
self._remove_expired_reservations()
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
client = self.redis_maker()
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
if pqueue_reservation_data_str is None:
return
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
if ACTIVE_STATUS in pqueue_reservation_data:
enqueue_free_experiment_args = self._clean_current_reservation(reservation_id)
else:
enqueue_free_experiment_args = None
self._delete_reservation(reservation_id)
if enqueue_free_experiment_args is not None:
self.confirmer.enqueue_free_experiment(*enqueue_free_experiment_args)
def _clean_current_reservation(self, reservation_id):
client = self.redis_maker()
enqueue_free_experiment_args = None
if reservation_id is not None:
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
reservation_data_str = client.get(weblab_reservation_pqueue)
if reservation_data_str is not None:
downgraded = self.reservations_manager.downgrade_confirmation(reservation_id)
if downgraded:
reservation_data = json.loads(reservation_data_str)
resource_instance_str = reservation_data.get(RESOURCE_INSTANCE)
if resource_instance_str is not None:
resource_instance = Resource.parse(resource_instance_str)
weblab_resource_pqueue_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (resource_instance.resource_type, resource_instance.resource_instance)
client.srem(weblab_resource_pqueue_instance_reservations, reservation_id)
# print "RELEASING AT _clean_current_reservation. SHOULD NEVER HAPPEN."
# self.resources_manager.release_resource(resource_instance)
lab_session_id = reservation_data.get(LAB_SESSION_ID)
experiment_instance_str = reservation_data.get(EXPERIMENT_INSTANCE)
experiment_instance_id = ExperimentInstanceId.parse(experiment_instance_str)
if experiment_instance_id is not None:
# If the experiment instance doesn't exist, there is no need to call the free_experiment method
lab_coord_address = reservation_data.get(LAB_COORD)
enqueue_free_experiment_args = (lab_coord_address, reservation_id, lab_session_id, experiment_instance_id)
# otherwise the student has been removed
return enqueue_free_experiment_args
def update(self):
self._update_queues()
#############################################################
#
# Take the queue of a given Resource Type and update it
#
@exc_checker
def _update_queues(self):
###########################################################
# There are reasons why a waiting reservation may not be
# able to be promoted while the next one is. For instance,
# if a user is waiting for "pld boards", but only for
# instances of "pld boards" which have a "ud-binary@Binary
# experiments" server running. If only a "ud-pld@PLD
# Experiments" is available, then this user will not be
# promoted and the another user which is waiting for a
# "ud-pld@PLD Experiments" can be promoted.
#
# Therefore, we have a list of the IDs of the waiting
# reservations we previously thought that they couldn't be
# promoted in this iteration. They will have another
# chance in the next run of _update_queues.
#
previously_waiting_reservation_ids = []
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
weblab_resource_slots = WEBLAB_RESOURCE_SLOTS % self.resource_type_name
###########################################################
# While there are free instances and waiting reservations,
# take the first waiting reservation and set it to current
# reservation. Make this repeatedly because we want to
# commit each change
#
while True:
client = self.redis_maker()
filled_waiting_reservation_ids = client.zrangebyscore(weblab_resource_pqueue_sorted, -10000, +10000, start=0, num=len(previously_waiting_reservation_ids) + 1)
first_waiting_reservation_id = None
for filled_waiting_reservation_id in filled_waiting_reservation_ids:
waiting_reservation_id = filled_waiting_reservation_id[filled_waiting_reservation_id.find('_')+1:]
if waiting_reservation_id not in previously_waiting_reservation_ids:
first_waiting_reservation_id = waiting_reservation_id
break
if first_waiting_reservation_id is None:
return # There is no waiting reservation for this resource that we haven't already tried
previously_waiting_reservation_ids.append(first_waiting_reservation_id)
#
# For the current resource_type, let's ask for
# all the resource instances available (i.e. those
# who are a member on weblab:resource:%s:slots )
#
free_instances = [ Resource(self.resource_type_name, resource_instance)
for resource_instance in client.smembers(weblab_resource_slots) ]
if len(free_instances) == 0:
# If there is no free instance, just return
return
#
# Select the correct free_instance for the current student among
# all the free_instances
#
if self.randomize_instances:
randomized_free_instances = [ free_instance for free_instance in free_instances ]
random.shuffle(randomized_free_instances)
else:
randomized_free_instances = sorted(free_instances, cmp=lambda r1, r2: cmp(r1.resource_type, r2.resource_type) or cmp(r1.resource_instance, r2.resource_instance))
for free_instance in randomized_free_instances:
#
# IMPORTANT: from here on every "continue" should first revoke the
# reservations_manager and resources_manager confirmations
#
working = self.resources_manager.check_working(free_instance)
if not working:
# The instance is not working
continue
confirmed = self.reservations_manager.confirm(first_waiting_reservation_id)
if not confirmed:
# student has already been confirmed somewhere else, so don't try with other
# instances, but rather with other student
break
acquired = self.resources_manager.acquire_resource(free_instance)
# print "ACQUIRED", free_instance, acquired, time.time()
if not acquired:
# the instance has been acquired by someone else. unconfirm student and
# try again with other free_instance
self.reservations_manager.downgrade_confirmation(first_waiting_reservation_id)
continue
weblab_resource_pqueue_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, free_instance.resource_instance)
client.sadd(weblab_resource_pqueue_instance_reservations, first_waiting_reservation_id)
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, first_waiting_reservation_id)
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
reservation_data = self.reservations_manager.get_reservation_data(first_waiting_reservation_id)
if pqueue_reservation_data_str is None or reservation_data is None:
# the student is not here anymore; downgrading confirmation is not required
# but releasing the resource is; and skip the rest of the free instances
self.resources_manager.release_resource(free_instance)
client.srem(weblab_resource_pqueue_instance_reservations, first_waiting_reservation_id)
break
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
start_time = self.time_provider.get_time()
total_time = pqueue_reservation_data[TIME]
pqueue_reservation_data[START_TIME] = start_time
pqueue_reservation_data[TIMESTAMP_BEFORE] = start_time
pqueue_reservation_data[ACTIVE_STATUS] = STATUS_WAITING_CONFIRMATION
pqueue_reservation_data[RESOURCE_INSTANCE] = free_instance.to_weblab_str()
initialization_in_accounting = pqueue_reservation_data[INITIALIZATION_IN_ACCOUNTING]
client_initial_data = reservation_data[CLIENT_INITIAL_DATA]
request_info = json.loads(reservation_data[REQUEST_INFO])
username = request_info.get('username')
locale = request_info.get('locale')
requested_experiment_type = ExperimentId.parse(reservation_data[EXPERIMENT_TYPE])
selected_experiment_instance = None
experiment_instances = self.resources_manager.list_experiment_instance_ids_by_resource(free_instance)
for experiment_instance in experiment_instances:
if experiment_instance.to_experiment_id() == requested_experiment_type:
selected_experiment_instance = experiment_instance
if selected_experiment_instance is None:
# This resource is not valid for this user, other free_instance should be
# selected. Try with other, but first clean the acquired resources
self.reservations_manager.downgrade_confirmation(first_waiting_reservation_id)
self.resources_manager.release_resource(free_instance)
client.srem(weblab_resource_pqueue_instance_reservations, first_waiting_reservation_id)
continue
pqueue_reservation_data[EXPERIMENT_INSTANCE] = selected_experiment_instance.to_weblab_str()
laboratory_coord_address = self.resources_manager.get_laboratory_coordaddress_by_experiment_instance_id(selected_experiment_instance)
pqueue_reservation_data[LAB_COORD] = laboratory_coord_address
client.set(weblab_reservation_pqueue, json.dumps(pqueue_reservation_data))
filled_reservation_id = client.hget(weblab_resource_pqueue_map, first_waiting_reservation_id)
client.zrem(weblab_resource_pqueue_sorted, filled_reservation_id)
#
# Enqueue the confirmation, since it might take a long time
# (for instance, if the laboratory server does not reply because
# of any network problem, or it just takes too much in replying),
# so this method might take too long. That's why we enqueue these
# petitions and run them in other threads.
#
deserialized_server_initial_data = {
'priority.queue.slot.length' : '%s' % total_time,
'priority.queue.slot.start' : '%s' % datetime.datetime.fromtimestamp(start_time),
'priority.queue.slot.initialization_in_accounting' : initialization_in_accounting,
'request.experiment_id.experiment_name' : selected_experiment_instance.exp_name,
'request.experiment_id.category_name' : selected_experiment_instance.cat_name,
'request.username' : username,
'request.full_name' : username,
'request.locale' : locale,
# TODO: add the username and user full name here
}
server_initial_data = json.dumps(deserialized_server_initial_data)
# server_initial_data will contain information such as "what was the last experiment used?".
# If a single resource was used by a binary experiment, then the next time may not require reprogramming the device
self.confirmer.enqueue_confirmation(laboratory_coord_address, first_waiting_reservation_id, selected_experiment_instance, client_initial_data, server_initial_data, self.resource_type_name)
#
# After it, keep in the while True in order to add the next
# reservation
#
break
################################################
#
# Remove all reservations whose session has expired
#
@exc_checker
def _remove_expired_reservations(self):
now = self.time_provider.get_time()
enqueue_free_experiment_args_retrieved = []
client = self.redis_maker()
weblab_resource_pqueue_reservations = WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name
reservations = [ reservation_id for reservation_id in client.smembers(weblab_resource_pqueue_reservations) ]
# Since there might be a lot of reservations, create a pipeline and retrieve
# every reservation data in a row
pipeline = client.pipeline()
for reservation_id in reservations:
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
pipeline.get(weblab_reservation_pqueue)
results = pipeline.execute()
for reservation_id, reservation_data in zip(reservations, results):
if reservation_data is not None:
data = json.loads(reservation_data)
if ACTIVE_STATUS in data:
total_time = data[TIME]
timestamp_before = data[TIMESTAMP_BEFORE]
timestamp_after = data.get(TIMESTAMP_AFTER)
initialization_in_accounting = data[INITIALIZATION_IN_ACCOUNTING]
# if timestamp_after is None and initialization should not be considered,
# then we can not calculate if the time has expired, so we skip it (it will
# be considered as expired for lack of LATEST_ACCESS
if timestamp_after is not None or initialization_in_accounting:
timestamp = timestamp_before if initialization_in_accounting else timestamp_after
if now >= timestamp + total_time: # Expired
enqueue_free_experiment_args = self._clean_current_reservation(reservation_id)
enqueue_free_experiment_args_retrieved.append(enqueue_free_experiment_args)
self._delete_reservation(reservation_id)
self.reservations_manager.delete(reservation_id)
# Anybody with latest_access later than this point is expired
current_expiration_time = datetime.datetime.utcfromtimestamp(now - EXPIRATION_TIME)
for expired_reservation_id in self.reservations_manager.list_expired_reservations(current_expiration_time):
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, expired_reservation_id)
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
if pqueue_reservation_data_str is None:
continue
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
if ACTIVE_STATUS in pqueue_reservation_data:
enqueue_free_experiment_args = self._clean_current_reservation(expired_reservation_id)
enqueue_free_experiment_args_retrieved.append(enqueue_free_experiment_args)
self._delete_reservation(expired_reservation_id)
self.reservations_manager.delete(expired_reservation_id)
for enqueue_free_experiment_args in enqueue_free_experiment_args_retrieved:
if enqueue_free_experiment_args is not None:
self.confirmer.enqueue_free_experiment(*enqueue_free_experiment_args)
def _delete_reservation(self, reservation_id):
weblab_resource_pqueue_reservations = WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
resource_instances = self.resources_manager.list_resource_instances_by_type(self.resource_type_name)
client = self.redis_maker()
pipeline = client.pipeline()
for resource_instance in resource_instances:
weblab_resource_pqueue_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, resource_instance.resource_instance)
pipeline.srem(weblab_resource_pqueue_instance_reservations, reservation_id)
pipeline.srem(weblab_resource_pqueue_reservations, reservation_id)
pipeline.delete(weblab_reservation_pqueue)
pipeline.execute()
filled_reservation_id = client.hget(weblab_resource_pqueue_map, reservation_id)
client.hdel(weblab_resource_pqueue_map, reservation_id)
client.zrem(weblab_resource_pqueue_sorted, filled_reservation_id)
##############################################################
#
# ONLY FOR TESTING: It completely removes the whole database
#
@Override(Scheduler)
def _clean(self):
client = self.redis_maker()
for reservation_id in self.reservations_manager.list_all_reservations():
client.delete(WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id))
client.delete(WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, '*'))
for resource_instance in self.resources_manager.list_resource_instances_by_type(self.resource_type_name):
client.delete(WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, resource_instance.resource_instance))
client.delete(WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name)
client.delete(WEBLAB_RESOURCE_PQUEUE_POSITIONS % self.resource_type_name)
client.delete(WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name)
client.delete(WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name)
| 50.825826 | 271 | 0.668951 |
f97b41e00312ca8204eba6d5739637c181b6e245 | 6,181 | py | Python | opps/archives/migrations/0002_auto__add_field_file_archive_link__chg_field_file_archive.py | jeanmask/opps | 031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87 | [
"MIT"
] | 159 | 2015-01-03T16:36:35.000Z | 2022-03-29T20:50:13.000Z | opps/archives/migrations/0002_auto__add_field_file_archive_link__chg_field_file_archive.py | jeanmask/opps | 031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87 | [
"MIT"
] | 81 | 2015-01-02T21:26:16.000Z | 2021-05-29T12:24:52.000Z | opps/archives/migrations/0002_auto__add_field_file_archive_link__chg_field_file_archive.py | jeanmask/opps | 031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87 | [
"MIT"
] | 75 | 2015-01-23T13:41:03.000Z | 2021-09-24T03:45:23.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'File.archive_link'
db.add_column(u'archives_file', 'archive_link',
self.gf('django.db.models.fields.URLField')(max_length=255, null=True, blank=True),
keep_default=False)
try:
# Adding M2M table for field mirror_site on 'File'
m2m_table_name = db.shorten_name(u'archives_file_mirror_site')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('file', models.ForeignKey(orm[u'archives.file'], null=False)),
('site', models.ForeignKey(orm[u'sites.site'], null=False))
))
db.create_unique(m2m_table_name, ['file_id', 'site_id'])
except:
pass
# Changing field 'File.archive'
db.alter_column(u'archives_file', 'archive', self.gf('django.db.models.fields.files.FileField')(max_length=255, null=True))
def backwards(self, orm):
# Deleting field 'File.archive_link'
db.delete_column(u'archives_file', 'archive_link')
try:
# Removing M2M table for field mirror_site on 'File'
db.delete_table(db.shorten_name(u'archives_file_mirror_site'))
except:
pass
# Changing field 'File.archive'
db.alter_column(u'archives_file', 'archive', self.gf('django.db.models.fields.files.FileField')(default='', max_length=255))
models = {
u'%s.%s' % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User.__name__},
},
u'archives.file': {
'Meta': {'object_name': 'File'},
'archive': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'archive_link': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mirror_site': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'archives_file_mirror_site'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['archives'] | 61.81 | 218 | 0.577576 |
55b8c46dc892c5d632dcf48394f5d16cc7625a8c | 2,343 | py | Python | src/robotide/widgets/dialog.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-08-20T14:46:02.000Z | 2017-08-20T14:46:02.000Z | src/robotide/widgets/dialog.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robotide/widgets/dialog.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide.widgets import htmlwindow, sizers
class Dialog(wx.Dialog):
def __init__(self, title='', parent=None, size=None, style=None):
parent = parent or wx.GetTopLevelWindows()[0]
size = size or (-1, -1)
# wx.THICK_FRAME allows resizing
if wx.VERSION >= (3, 0, 3, ''): # DEBUG wxPhoenix
style = style or (wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
else:
style = style or (wx.DEFAULT_DIALOG_STYLE | wx.THICK_FRAME)
wx.Dialog.__init__(self, parent, title=title, size=size, style=style)
self.CenterOnParent()
def _create_buttons(self, sizer):
buttons = self.CreateStdDialogButtonSizer(wx.OK | wx.CANCEL)
sizer.Add(buttons, flag=wx.ALIGN_CENTER | wx.ALL, border=5)
def _create_horizontal_line(self, sizer):
line = wx.StaticLine(self, size=(20, -1), style=wx.LI_HORIZONTAL)
sizer.Add(
line, border=5,
flag=wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.TOP)
def execute(self):
retval = None
if self.ShowModal() == wx.ID_OK:
retval = self._execute()
self.Destroy()
return retval
def _execute(self):
raise NotImplementedError(self.__class__.__name__)
class HtmlDialog(Dialog):
def __init__(self, title, content, padding=0, font_size=-1):
Dialog.__init__(self, title)
szr = sizers.VerticalSizer()
html = htmlwindow.HtmlWindow(self, text=content)
html.SetStandardFonts(size=font_size)
szr.add_expanding(html, padding=padding)
self.SetSizer(szr)
def OnKey(self, event):
pass
| 35.5 | 78 | 0.64618 |
327ed44d1b8451902b0d8e41593c6ef41e571ba6 | 7,417 | py | Python | azure-mgmt-storage/azure/mgmt/storage/v2018_02_01/operations/usage_operations.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-storage/azure/mgmt/storage/v2018_02_01/operations/usage_operations.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | azure-mgmt-storage/azure/mgmt/storage/v2018_02_01/operations/usage_operations.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class UsageOperations(object):
"""UsageOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for this operation. Constant value: "2018-02-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-02-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the current usage count and the limit for the resources under the
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Usage
:rtype:
~azure.mgmt.storage.v2018_02_01.models.UsagePaged[~azure.mgmt.storage.v2018_02_01.models.Usage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages'}
def list_by_location(
self, location, custom_headers=None, raw=False, **operation_config):
"""Gets the current usage count and the limit for the resources of the
location under the subscription.
:param location: The location of the Azure Storage resource.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Usage
:rtype:
~azure.mgmt.storage.v2018_02_01.models.UsagePaged[~azure.mgmt.storage.v2018_02_01.models.Usage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_location.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'location': self._serialize.url("location", location, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages'}
| 42.626437 | 144 | 0.629904 |
337faf5f0e6c67eef48cc5ed3f0bde2537760753 | 10,997 | py | Python | cradmin_legacy/viewhelpers/multiselect2/target_renderer.py | appressoas/cradmin_legacy | b9d024299333dd04c87c1031bd5be5778aa7f1f1 | [
"BSD-3-Clause"
] | null | null | null | cradmin_legacy/viewhelpers/multiselect2/target_renderer.py | appressoas/cradmin_legacy | b9d024299333dd04c87c1031bd5be5778aa7f1f1 | [
"BSD-3-Clause"
] | 17 | 2018-03-07T15:52:42.000Z | 2022-03-12T01:07:06.000Z | cradmin_legacy/viewhelpers/multiselect2/target_renderer.py | appressoas/cradmin_legacy | b9d024299333dd04c87c1031bd5be5778aa7f1f1 | [
"BSD-3-Clause"
] | 1 | 2018-07-23T22:13:45.000Z | 2018-07-23T22:13:45.000Z | import json
from xml.sax.saxutils import quoteattr
from crispy_forms import layout
from django.utils.translation import pgettext_lazy
from cradmin_legacy import renderable
from cradmin_legacy.crispylayouts import PrimarySubmitBlock, CradminFormHelper
from cradmin_legacy.viewhelpers.listfilter.base import abstractfilterlistchild
class Target(renderable.AbstractRenderableWithCss,
abstractfilterlistchild.FilterListChildMixin):
"""
Renders a multiselect target form.
If you have multiple lists with multiselect2 on the same page,
you have to ensure the DOM IDs are unique by overriding :meth:`.get_dom_id`.
"""
#: The template used to render this renderable.
template_name = 'cradmin_legacy/viewhelpers/multiselect2/target_renderer/target.django.html'
#: The default for :meth:`.~Target.get_dom_id`.
default_target_dom_id = 'cradmin_legacy_multiselect2_select_target'
#: Used to add custom attributes like angularjs directives to the form.
#: See :meth:`.get_form_attributes`.
form_attributes = {}
def __init__(self, form,
dom_id=None,
without_items_text=None,
with_items_title=None,
no_items_selected_text=None,
submit_button_text=None,
form_action=None,
empty_selection_allowed=False):
"""
Args:
form: A django Form object.
dom_id: See :meth:`.get_dom_id`.
without_items_text: See :meth:`.get_without_items_text`.
no_items_selected_text: See :meth:`.get_no_items_selected_text`.
with_items_title: See :meth:`.get_with_items_title`.
submit_button_text: See :meth:`.get_submit_button_text`.
form_action: See :meth:`.get_form_action`.
"""
self.form = form
self.dom_id = dom_id
self.without_items_text = without_items_text
self.with_items_title = with_items_title
self.no_items_selected_text = no_items_selected_text
self.submit_button_text = submit_button_text
self.form_action = form_action
self.empty_selection_allowed = empty_selection_allowed
def get_dom_id(self):
"""
Returns:
str: The DOM id of the form wrapping everything rendered by this renderable.
Defaults to the ``dom_id`` parameter for ``__init__`` falling back on
:obj:`.default_target_dom_id`.
"""
if self.dom_id:
return self.dom_id
else:
return self.default_target_dom_id
def get_with_items_title(self):
"""
Returns:
str: The title of the box when there are items selected.
Defaults to the ``with_items_title`` parameter for ``__init__``,
falling back on ``"Selected items"`` (translatable).
"""
if self.with_items_title:
return self.with_items_title
else:
return pgettext_lazy('multiselect2 target renderer', 'Selected items')
def get_submit_button_text(self):
"""
This is used in :meth:`.get_buttons`, so if you override that or :meth:`.get_button_layout`,
this is not used.
Returns:
str: The submit button text.
Defaults to the ``submit_button_text`` parameter for ``__init__``,
falling back on ``"Submit selection"`` (translatable).
"""
if self.submit_button_text:
return self.submit_button_text
else:
return pgettext_lazy('multiselect2 target renderer', 'Submit selection')
def get_without_items_text(self):
"""
Returns:
str: The text to show when there are no items selected and :meth:`.get_empty_selection_allowed`
returns ``False``.
Defaults to the ``without_items_text`` parameter for ``__init__``,
falling back on empty string.
"""
if self.without_items_text:
return self.without_items_text
else:
return ''
def get_empty_selection_allowed(self):
"""
Returns:
bool: If no selected items is allowed, this should return ``True``.
Returns the value of the ``empty_selection_allowed`` parameter
for ``__init__`` by default.
"""
return self.empty_selection_allowed
def get_no_items_selected_text(self):
"""
Returns:
str: The text to show when there are no items selected and :meth:`.get_empty_selection_allowed`
returns ``True``.
Defaults to the ``no_items_selected_text`` parameter for ``__init__``,
falling back on ``"(None)"``.
"""
if self.no_items_selected_text:
return self.no_items_selected_text
else:
return pgettext_lazy('multiselect2 target renderer', '(None)')
def get_form_action(self, request):
"""
If you override this, you should also override
:meth:`.post_url_as_it_is_when_form_is_submitted` and return ``False``.
Args:
request: An HttpRequest object.
Returns:
str: The ``<form>`` action attribute value.
Defaults to the ``form_action`` parameter for ``__init__``,
falling back on ``request.get_full_path()``.
"""
if self.form_action:
return self.form_action
else:
return request.get_full_path()
def post_url_as_it_is_when_form_is_submitted(self):
"""
If this returns ``True`` (the default), the ``action``-attribute of the
form will be updated to match the URL in the browser when the form
is submitted.
This defaults to ``True`` unless you set :obj:`.form_action`.
The primary reason for this feature is here is for javascript libraries
that update the URL. It is usually a bad user experience to reset their
choices (filters, search, etc.) when they post their selection.
"""
if self.form_action:
return False
else:
return True
def get_angularjs_directive_dict(self):
"""
Get options for the ``cradmin-legacy-multiselect2-target`` angularjs
directive.
Returns:
dict: With options for the directive.
"""
return {
'updateFormActionToWindowLocation': self.post_url_as_it_is_when_form_is_submitted(),
}
def get_angularjs_directive_json(self):
"""
JSON encode :meth:`.get_angularjs_directive_dict`.
Returns:
str: The return value of :meth:`.get_select_directive_dict`
as a json encoded and xml attribute encoded string.
"""
return quoteattr(json.dumps(self.get_angularjs_directive_dict()))
def get_context_data(self, request=None):
context = super(Target, self).get_context_data(request=request)
context['form_action'] = self.get_form_action(request=request)
context['angularjs_directive_json'] = self.get_angularjs_directive_json()
return context
def get_field_layout(self):
"""
Get a list/tuple of fields. These are added to a ``crispy_forms.layout.Layout``.
Must be overridden.
Simple example::
from cradmin_legacy.viewhelpers import multiselect2view
from crispy_forms import layout
class MyMultiselect2View(multiselect2view.ListbuilderView):
# ... other required stuff
def get_field_layout(self):
return [
'name',
layout.Field('age', css_class="the-name")
]
"""
return []
def get_hidden_fields(self):
"""
Get hidden fields for the form.
Returns:
An iterable of :class:`crispy_forms.layout.Hidden` objects.
Defaults to an empty list.
"""
return []
def get_buttons(self):
"""
Get buttons for the form, normally one or more submit button.
Each button must be a crispy form layout object, typically some
subclass of :class:`crispy_forms.layout.Submit`.
The default is::
from cradmin_legacy.crispylayouts import PrimarySubmitBlock
return [
PrimarySubmitBlock('save', self.get_submit_button_text()),
]
.. seealso:: This method is used by :meth:`.get_button_layout`.
The default label is returned by :meth:`.get_submit_button_text`.
"""
return [
PrimarySubmitBlock('save', self.get_submit_button_text()),
]
def get_button_layout(self):
"""
Get the button layout. This is added to the crispy form layout.
You will normally want to override :meth:`.get_buttons` instead of this
method.
Defaults to a :class:`crispy_forms.layout.Div` with css class
``cradmin-legacy-multiselect2-target-submitbuttons`` containing all the buttons
returned by :meth:`.get_buttons`.
"""
return [
layout.Div(*self.get_buttons(),
css_class="cradmin-legacy-multiselect2-target-submitbuttons")
]
def get_formhelper_class(self):
return CradminFormHelper
def get_formhelper(self):
"""
Get a :class:`crispy_forms.helper.FormHelper`.
You normally do not need to override this directly. Instead
you should override:
- :meth:`.get_field_layout`.
- :meth:`.get_hidden_fields`
- :meth:`.get_buttons` (or perhaps :meth:`.get_button_layout`)
"""
helper = self.get_formhelper_class()()
layoutargs = list(self.get_field_layout()) + list(self.get_button_layout()) + list(self.get_hidden_fields())
helper.layout = layout.Layout(*layoutargs)
helper.form_tag = False
return helper
class ManyToManySelectTarget(Target):
"""
Renders a multiselect target form for
:class:`cradmin_legacy.viewhelpers.multiselect2.manytomanywidget.Widget`.
"""
def __init__(self, target_formfield_id, *args, **kwargs):
"""
Args:
target_formfield_id: The DOM ID of the target form field.
"""
self.target_formfield_id = target_formfield_id
super(ManyToManySelectTarget, self).__init__(*args, **kwargs)
def get_usethis_directive_dict(self):
return {
'fieldid': self.target_formfield_id
}
def get_usethis_directive_json(self):
return json.dumps(self.get_usethis_directive_dict())
def get_buttons(self):
return [
PrimarySubmitBlock('usethis', self.get_submit_button_text(),
cradmin_legacy_multiselect2_use_this=self.get_usethis_directive_json()),
]
| 34.581761 | 116 | 0.624261 |
42c99778f25b524851505b3149b144227bcc8794 | 1,109 | py | Python | migrations/versions/0177_add_virus_scan_statuses.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 51 | 2016-04-03T23:36:17.000Z | 2022-03-21T20:04:52.000Z | migrations/versions/0177_add_virus_scan_statuses.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 1,335 | 2015-12-15T14:28:50.000Z | 2022-03-30T16:24:27.000Z | migrations/versions/0177_add_virus_scan_statuses.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 30 | 2016-01-08T19:05:32.000Z | 2021-12-20T16:37:23.000Z | """
Revision ID: 0177_add_virus_scan_statuses
Revises: 0176_alter_billing_columns
Create Date: 2018-02-21 14:05:04.448977
"""
from alembic import op
revision = '0177_add_virus_scan_statuses'
down_revision = '0176_alter_billing_columns'
def upgrade():
op.execute("INSERT INTO notification_status_types (name) VALUES ('pending-virus-check')")
op.execute("INSERT INTO notification_status_types (name) VALUES ('virus-scan-failed')")
def downgrade():
op.execute("UPDATE notifications SET notification_status = 'created' WHERE notification_status = 'pending-virus-check'")
op.execute("UPDATE notification_history SET notification_status = 'created' WHERE notification_status = 'pending-virus-check'")
op.execute("UPDATE notifications SET notification_status = 'permanent-failure' WHERE notification_status = 'virus-scan-failed'")
op.execute("UPDATE notification_history SET notification_status = 'permanent-failure' WHERE notification_status = 'virus-scan-failed'")
op.execute("DELETE FROM notification_status_types WHERE name in ('pending-virus-check', 'virus-scan-failed')")
| 39.607143 | 139 | 0.77908 |
3ac8dffae4974d1a976b2d75f25144048f5ffdde | 2,787 | py | Python | pysteps/tests/test_plt_precipfields.py | leabeusch/pysteps | 5f162d4b1155e4cfd894c9635eed3f0e823adedd | [
"BSD-3-Clause"
] | 285 | 2018-07-11T10:42:43.000Z | 2022-03-23T13:44:54.000Z | pysteps/tests/test_plt_precipfields.py | leabeusch/pysteps | 5f162d4b1155e4cfd894c9635eed3f0e823adedd | [
"BSD-3-Clause"
] | 246 | 2018-07-16T06:17:12.000Z | 2022-03-22T15:45:08.000Z | pysteps/tests/test_plt_precipfields.py | leabeusch/pysteps | 5f162d4b1155e4cfd894c9635eed3f0e823adedd | [
"BSD-3-Clause"
] | 97 | 2018-07-12T12:05:45.000Z | 2022-03-31T14:56:31.000Z | # -*- coding: utf-8 -*-
import pytest
from pysteps.visualization import plot_precip_field
from pysteps.utils import conversion
from pysteps.postprocessing import ensemblestats
from pysteps.tests.helpers import get_precipitation_fields
import matplotlib.pyplot as plt
import numpy as np
plt_arg_names = (
"source",
"plot_type",
"bbox",
"colorscale",
"probthr",
"title",
"colorbar",
"axis",
)
plt_arg_values = [
("mch", "intensity", None, "pysteps", None, None, False, "off"),
("mch", "depth", None, "pysteps", None, "Title", True, "on"),
("mch", "prob", None, "pysteps", 0.1, None, True, "on"),
("mch", "intensity", None, "STEPS-BE", None, None, True, "on"),
("mch", "intensity", None, "BOM-RF3", None, None, True, "on"),
("bom", "intensity", None, "pysteps", None, None, True, "on"),
("fmi", "intensity", None, "pysteps", None, None, True, "on"),
("knmi", "intensity", None, "pysteps", None, None, True, "on"),
("knmi", "intensity", [300, 300, 500, 500], "pysteps", None, None, True, "on"),
("opera", "intensity", None, "pysteps", None, None, True, "on"),
("saf", "intensity", None, "pysteps", None, None, True, "on"),
]
@pytest.mark.parametrize(plt_arg_names, plt_arg_values)
def test_visualization_plot_precip_field(
source, plot_type, bbox, colorscale, probthr, title, colorbar, axis
):
if plot_type == "intensity":
field, metadata = get_precipitation_fields(0, 0, True, True, None, source)
field = field.squeeze()
field, metadata = conversion.to_rainrate(field, metadata)
elif plot_type == "depth":
field, metadata = get_precipitation_fields(0, 0, True, True, None, source)
field = field.squeeze()
field, metadata = conversion.to_raindepth(field, metadata)
elif plot_type == "prob":
field, metadata = get_precipitation_fields(0, 10, True, True, None, source)
field, metadata = conversion.to_rainrate(field, metadata)
field = ensemblestats.excprob(field, probthr)
field_orig = field.copy()
ax = plot_precip_field(
field.copy(),
ptype=plot_type,
bbox=bbox,
geodata=None,
colorscale=colorscale,
probthr=probthr,
units=metadata["unit"],
title=title,
colorbar=colorbar,
axis=axis,
)
# Check that plot_precip_field does not modify the input data
field_orig = np.ma.masked_invalid(field_orig)
field_orig.data[field_orig.mask] = -100
field = np.ma.masked_invalid(field)
field.data[field.mask] = -100
assert np.array_equal(field_orig.data, field.data)
if __name__ == "__main__":
for i, args in enumerate(plt_arg_values):
test_visualization_plot_precip_field(*args)
plt.show()
| 32.034483 | 83 | 0.640115 |
6985993848520d165285fca051c147433fd69a22 | 4,013 | py | Python | google/ads/google_ads/v0/proto/enums/frequency_cap_time_unit_pb2.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v0/proto/enums/frequency_cap_time_unit_pb2.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v0/proto/enums/frequency_cap_time_unit_pb2.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v0/proto/enums/frequency_cap_time_unit.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v0/proto/enums/frequency_cap_time_unit.proto',
package='google.ads.googleads.v0.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v0.enumsB\031FrequencyCapTimeUnitProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v0/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V0.Enums\312\002\035Google\\Ads\\GoogleAds\\V0\\Enums\352\002!Google::Ads::GoogleAds::V0::Enums'),
serialized_pb=_b('\nAgoogle/ads/googleads_v0/proto/enums/frequency_cap_time_unit.proto\x12\x1dgoogle.ads.googleads.v0.enums\"n\n\x18\x46requencyCapTimeUnitEnum\"R\n\x14\x46requencyCapTimeUnit\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x07\n\x03\x44\x41Y\x10\x02\x12\x08\n\x04WEEK\x10\x03\x12\t\n\x05MONTH\x10\x04\x42\xee\x01\n!com.google.ads.googleads.v0.enumsB\x19\x46requencyCapTimeUnitProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v0/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V0.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V0\\Enums\xea\x02!Google::Ads::GoogleAds::V0::Enumsb\x06proto3')
)
_FREQUENCYCAPTIMEUNITENUM_FREQUENCYCAPTIMEUNIT = _descriptor.EnumDescriptor(
name='FrequencyCapTimeUnit',
full_name='google.ads.googleads.v0.enums.FrequencyCapTimeUnitEnum.FrequencyCapTimeUnit',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DAY', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEEK', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MONTH', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=128,
serialized_end=210,
)
_sym_db.RegisterEnumDescriptor(_FREQUENCYCAPTIMEUNITENUM_FREQUENCYCAPTIMEUNIT)
_FREQUENCYCAPTIMEUNITENUM = _descriptor.Descriptor(
name='FrequencyCapTimeUnitEnum',
full_name='google.ads.googleads.v0.enums.FrequencyCapTimeUnitEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_FREQUENCYCAPTIMEUNITENUM_FREQUENCYCAPTIMEUNIT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=210,
)
_FREQUENCYCAPTIMEUNITENUM_FREQUENCYCAPTIMEUNIT.containing_type = _FREQUENCYCAPTIMEUNITENUM
DESCRIPTOR.message_types_by_name['FrequencyCapTimeUnitEnum'] = _FREQUENCYCAPTIMEUNITENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FrequencyCapTimeUnitEnum = _reflection.GeneratedProtocolMessageType('FrequencyCapTimeUnitEnum', (_message.Message,), dict(
DESCRIPTOR = _FREQUENCYCAPTIMEUNITENUM,
__module__ = 'google.ads.googleads_v0.proto.enums.frequency_cap_time_unit_pb2'
,
__doc__ = """Container for enum describing the unit of time the cap is defined at.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v0.enums.FrequencyCapTimeUnitEnum)
))
_sym_db.RegisterMessage(FrequencyCapTimeUnitEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 38.961165 | 647 | 0.784949 |
c10580816309817aa7951dd982ec752ef4ec8a75 | 24,957 | py | Python | lib/tool_shed/webapp/controllers/upload.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 4 | 2015-05-12T20:36:41.000Z | 2017-06-26T15:34:02.000Z | lib/tool_shed/webapp/controllers/upload.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 52 | 2015-03-16T14:02:14.000Z | 2021-12-24T09:50:23.000Z | lib/tool_shed/webapp/controllers/upload.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 1 | 2016-03-21T12:54:06.000Z | 2016-03-21T12:54:06.000Z | import logging
import os
import shutil
import tarfile
import tempfile
import requests
from galaxy import (
util,
web
)
from galaxy.util import checkers
from galaxy.webapps.base.controller import BaseUIController
from tool_shed.dependencies import attribute_handlers
from tool_shed.galaxy_install import dependency_display
from tool_shed.metadata import repository_metadata_manager
from tool_shed.repository_types import util as rt_util
from tool_shed.tools.data_table_manager import ShedToolDataTableManager
from tool_shed.util import (
basic_util,
commit_util,
hg_util,
repository_content_util,
repository_util,
shed_util_common as suc,
xml_util
)
from tool_shed.util.web_util import escape
log = logging.getLogger(__name__)
class UploadController(BaseUIController):
@web.expose
@web.require_login('upload', use_panels=True)
def upload(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
commit_message = escape(kwd.get('commit_message', 'Uploaded'))
repository_id = kwd.get('repository_id', '')
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
repo_dir = repository.repo_path(trans.app)
uncompress_file = util.string_as_bool(kwd.get('uncompress_file', 'true'))
remove_repo_files_not_in_tar = util.string_as_bool(kwd.get('remove_repo_files_not_in_tar', 'true'))
uploaded_file = None
upload_point = commit_util.get_upload_point(repository, **kwd)
tip = repository.tip()
file_data = kwd.get('file_data', '')
url = kwd.get('url', '')
# Part of the upload process is sending email notification to those that have registered to
# receive them. One scenario occurs when the first change set is produced for the repository.
# See the suc.handle_email_alerts() method for the definition of the scenarios.
new_repo_alert = repository.is_new()
uploaded_directory = None
if kwd.get('upload_button', False):
if file_data == '' and url == '':
message = 'No files were entered on the upload form.'
status = 'error'
uploaded_file = None
elif url and url.startswith('hg'):
# Use mercurial clone to fetch repository, contents will then be copied over.
uploaded_directory = tempfile.mkdtemp()
repo_url = 'http%s' % url[len('hg'):]
cloned_ok, error_message = hg_util.clone_repository(repo_url, uploaded_directory)
if not cloned_ok:
message = 'Error uploading via mercurial clone: %s' % error_message
status = 'error'
basic_util.remove_dir(uploaded_directory)
uploaded_directory = None
elif url:
valid_url = True
try:
stream = requests.get(url, stream=True)
except Exception as e:
valid_url = False
message = 'Error uploading file via http: %s' % util.unicodify(e)
status = 'error'
uploaded_file = None
if valid_url:
fd, uploaded_file_name = tempfile.mkstemp()
uploaded_file = open(uploaded_file_name, 'wb')
for chunk in stream.iter_content(chunk_size=util.CHUNK_SIZE):
if chunk:
uploaded_file.write(chunk)
uploaded_file.flush()
uploaded_file_filename = url.split('/')[-1]
isempty = os.path.getsize(os.path.abspath(uploaded_file_name)) == 0
elif file_data not in ('', None):
uploaded_file = file_data.file
uploaded_file_name = uploaded_file.name
uploaded_file_filename = os.path.split(file_data.filename)[-1]
isempty = os.path.getsize(os.path.abspath(uploaded_file_name)) == 0
if uploaded_file or uploaded_directory:
rdah = attribute_handlers.RepositoryDependencyAttributeHandler(trans.app, unpopulate=False)
tdah = attribute_handlers.ToolDependencyAttributeHandler(trans.app, unpopulate=False)
stdtm = ShedToolDataTableManager(trans.app)
ok = True
isgzip = False
isbz2 = False
if uploaded_file:
if uncompress_file:
isgzip = checkers.is_gzip(uploaded_file_name)
if not isgzip:
isbz2 = checkers.is_bz2(uploaded_file_name)
if isempty:
tar = None
istar = False
else:
# Determine what we have - a single file or an archive
try:
if (isgzip or isbz2) and uncompress_file:
# Open for reading with transparent compression.
tar = tarfile.open(uploaded_file_name, 'r:*')
else:
tar = tarfile.open(uploaded_file_name)
istar = True
except tarfile.ReadError:
tar = None
istar = False
else:
# Uploaded directory
istar = False
if istar:
ok, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \
repository_content_util.upload_tar(
trans,
rdah,
tdah,
repository,
tar,
uploaded_file,
upload_point,
remove_repo_files_not_in_tar,
commit_message,
new_repo_alert
)
elif uploaded_directory:
ok, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \
self.upload_directory(trans,
rdah,
tdah,
repository,
uploaded_directory,
upload_point,
remove_repo_files_not_in_tar,
commit_message,
new_repo_alert)
else:
if (isgzip or isbz2) and uncompress_file:
uploaded_file_filename = commit_util.uncompress(repository,
uploaded_file_name,
uploaded_file_filename,
isgzip=isgzip,
isbz2=isbz2)
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION and \
uploaded_file_filename != rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME:
ok = False
message = 'Repositories of type <b>Repository suite definition</b> can only contain a single file named '
message += '<b>repository_dependencies.xml</b>.'
elif repository.type == rt_util.TOOL_DEPENDENCY_DEFINITION and \
uploaded_file_filename != rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME:
ok = False
message = 'Repositories of type <b>Tool dependency definition</b> can only contain a single file named '
message += '<b>tool_dependencies.xml</b>.'
if ok:
if upload_point is not None:
full_path = os.path.abspath(os.path.join(repo_dir, upload_point, uploaded_file_filename))
else:
full_path = os.path.abspath(os.path.join(repo_dir, uploaded_file_filename))
# Move some version of the uploaded file to the load_point within the repository hierarchy.
if uploaded_file_filename in [rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME]:
# Inspect the contents of the file to see if toolshed or changeset_revision attributes
# are missing and if so, set them appropriately.
altered, root_elem, error_message = rdah.handle_tag_attributes(uploaded_file_name)
if error_message:
ok = False
message = error_message
status = 'error'
elif altered:
tmp_filename = xml_util.create_and_write_tmp_file(root_elem)
shutil.move(tmp_filename, full_path)
else:
shutil.move(uploaded_file_name, full_path)
elif uploaded_file_filename in [rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME]:
# Inspect the contents of the file to see if changeset_revision values are
# missing and if so, set them appropriately.
altered, root_elem, error_message = tdah.handle_tag_attributes(uploaded_file_name)
if error_message:
ok = False
message = error_message
status = 'error'
if ok:
if altered:
tmp_filename = xml_util.create_and_write_tmp_file(root_elem)
shutil.move(tmp_filename, full_path)
else:
shutil.move(uploaded_file_name, full_path)
else:
shutil.move(uploaded_file_name, full_path)
if ok:
# See if any admin users have chosen to receive email alerts when a repository is updated.
# If so, check every uploaded file to ensure content is appropriate.
check_contents = commit_util.check_file_contents_for_email_alerts(trans.app)
if check_contents and os.path.isfile(full_path):
content_alert_str = commit_util.check_file_content_for_html_and_images(full_path)
else:
content_alert_str = ''
hg_util.add_changeset(repo_dir, full_path)
hg_util.commit_changeset(repo_dir,
full_path_to_changeset=full_path,
username=trans.user.username,
message=commit_message)
if full_path.endswith('tool_data_table_conf.xml.sample'):
# Handle the special case where a tool_data_table_conf.xml.sample file is being uploaded
# by parsing the file and adding new entries to the in-memory trans.app.tool_data_tables
# dictionary.
error, error_message = stdtm.handle_sample_tool_data_table_conf_file(full_path, persist=False)
if error:
message = f'{message}<br/>{error_message}'
# See if the content of the change set was valid.
admin_only = len(repository.downloadable_revisions) != 1
suc.handle_email_alerts(trans.app,
trans.request.host,
repository,
content_alert_str=content_alert_str,
new_repo_alert=new_repo_alert,
admin_only=admin_only)
if ok:
# Update the repository files for browsing.
hg_util.update_repository(repo_dir)
# Get the new repository tip.
if tip == repository.tip():
message = 'No changes to repository. '
status = 'warning'
else:
if (isgzip or isbz2) and uncompress_file:
uncompress_str = ' uncompressed and '
else:
uncompress_str = ' '
if uploaded_directory:
source_type = "repository"
source = url
else:
source_type = "file"
source = uploaded_file_filename
message = "The %s <b>%s</b> has been successfully%suploaded to the repository. " % \
(source_type, escape(source), uncompress_str)
if istar and (undesirable_dirs_removed or undesirable_files_removed):
items_removed = undesirable_dirs_removed + undesirable_files_removed
message += " %d undesirable items (.hg .svn .git directories, .DS_Store, hgrc files, etc) " % items_removed
message += "were removed from the archive. "
if istar and remove_repo_files_not_in_tar and files_to_remove:
if upload_point is not None:
message += " %d files were removed from the repository relative to the selected upload point '%s'. " % \
(len(files_to_remove), upload_point)
else:
message += " %d files were removed from the repository root. " % len(files_to_remove)
rmm = repository_metadata_manager.RepositoryMetadataManager(app=trans.app,
user=trans.user,
repository=repository)
status, error_message = \
rmm.set_repository_metadata_due_to_new_tip(trans.request.host,
content_alert_str=content_alert_str,
**kwd)
if error_message:
message = error_message
kwd['message'] = message
if repository.metadata_revisions:
# A repository's metadata revisions are order descending by update_time, so the zeroth revision
# will be the tip just after an upload.
metadata_dict = repository.metadata_revisions[0].metadata
else:
metadata_dict = {}
dd = dependency_display.DependencyDisplayer(trans.app)
if str(repository.type) not in [rt_util.REPOSITORY_SUITE_DEFINITION,
rt_util.TOOL_DEPENDENCY_DEFINITION]:
change_repository_type_message = rt_util.generate_message_for_repository_type_change(trans.app,
repository)
if change_repository_type_message:
message += change_repository_type_message
status = 'warning'
else:
# Provide a warning message if a tool_dependencies.xml file is provided, but tool dependencies
# weren't loaded due to a requirement tag mismatch or some other problem. Tool dependency
# definitions can define orphan tool dependencies (no relationship to any tools contained in the
# repository), so warning messages are important because orphans are always valid. The repository
# owner must be warned in case they did not intend to define an orphan dependency, but simply
# provided incorrect information (tool shed, name owner, changeset_revision) for the definition.
orphan_message = dd.generate_message_for_orphan_tool_dependencies(repository, metadata_dict)
if orphan_message:
message += orphan_message
status = 'warning'
# Handle messaging for invalid tool dependencies.
invalid_tool_dependencies_message = dd.generate_message_for_invalid_tool_dependencies(metadata_dict)
if invalid_tool_dependencies_message:
message += invalid_tool_dependencies_message
status = 'error'
# Handle messaging for invalid repository dependencies.
invalid_repository_dependencies_message = \
dd.generate_message_for_invalid_repository_dependencies(metadata_dict,
error_from_tuple=True)
if invalid_repository_dependencies_message:
message += invalid_repository_dependencies_message
status = 'error'
# Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
stdtm.reset_tool_data_tables()
if uploaded_directory:
basic_util.remove_dir(uploaded_directory)
trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repository',
id=repository_id,
commit_message='Deleted selected files',
message=message,
status=status))
else:
if uploaded_directory:
basic_util.remove_dir(uploaded_directory)
status = 'error'
# Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
stdtm.reset_tool_data_tables()
return trans.fill_template('/webapps/tool_shed/repository/upload.mako',
repository=repository,
changeset_revision=tip,
url=url,
commit_message=commit_message,
uncompress_file=uncompress_file,
remove_repo_files_not_in_tar=remove_repo_files_not_in_tar,
message=message,
status=status)
def upload_directory(self, trans, rdah, tdah, repository, uploaded_directory, upload_point, remove_repo_files_not_in_tar,
commit_message, new_repo_alert):
repo_dir = repository.repo_path(trans.app)
undesirable_dirs_removed = 0
undesirable_files_removed = 0
if upload_point is not None:
full_path = os.path.abspath(os.path.join(repo_dir, upload_point))
else:
full_path = os.path.abspath(repo_dir)
filenames_in_archive = []
for root, dirs, files in os.walk(uploaded_directory):
for uploaded_file in files:
relative_path = os.path.normpath(os.path.join(os.path.relpath(root, uploaded_directory), uploaded_file))
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
ok = os.path.basename(uploaded_file) == rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME
elif repository.type == rt_util.TOOL_DEPENDENCY_DEFINITION:
ok = os.path.basename(uploaded_file) == rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME
else:
ok = os.path.basename(uploaded_file) not in commit_util.UNDESIRABLE_FILES
if ok:
for file_path_item in relative_path.split('/'):
if file_path_item in commit_util.UNDESIRABLE_DIRS:
undesirable_dirs_removed += 1
ok = False
break
else:
undesirable_files_removed += 1
if ok:
uploaded_file_name = os.path.abspath(os.path.join(root, uploaded_file))
if os.path.split(uploaded_file_name)[-1] == rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME:
# Inspect the contents of the file to see if toolshed or changeset_revision
# attributes are missing and if so, set them appropriately.
altered, root_elem, error_message = rdah.handle_tag_attributes(uploaded_file_name)
if error_message:
return False, error_message, [], '', [], []
elif altered:
tmp_filename = xml_util.create_and_write_tmp_file(root_elem)
shutil.move(tmp_filename, uploaded_file_name)
elif os.path.split(uploaded_file_name)[-1] == rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME:
# Inspect the contents of the file to see if toolshed or changeset_revision
# attributes are missing and if so, set them appropriately.
altered, root_elem, error_message = tdah.handle_tag_attributes(uploaded_file_name)
if error_message:
return False, error_message, [], '', [], []
if altered:
tmp_filename = xml_util.create_and_write_tmp_file(root_elem)
shutil.move(tmp_filename, uploaded_file_name)
repo_path = os.path.join(full_path, relative_path)
repo_basedir = os.path.normpath(os.path.join(repo_path, os.path.pardir))
if not os.path.exists(repo_basedir):
os.makedirs(repo_basedir)
if os.path.exists(repo_path):
if os.path.isdir(repo_path):
shutil.rmtree(repo_path)
else:
os.remove(repo_path)
shutil.move(os.path.join(uploaded_directory, relative_path), repo_path)
filenames_in_archive.append(relative_path)
return commit_util.handle_directory_changes(trans.app,
trans.request.host,
trans.user.username,
repository,
full_path,
filenames_in_archive,
remove_repo_files_not_in_tar,
new_repo_alert,
commit_message,
undesirable_dirs_removed,
undesirable_files_removed)
| 62.08209 | 138 | 0.498177 |
36c30d7a59699462252726790a8041aa4a8c495d | 3,842 | py | Python | basic_combat_simulator.py | amanparmar17/combat-simulator | 3ef258a58f667be0afe566e45e101d3e3073aa2f | [
"MIT"
] | null | null | null | basic_combat_simulator.py | amanparmar17/combat-simulator | 3ef258a58f667be0afe566e45e101d3e3073aa2f | [
"MIT"
] | null | null | null | basic_combat_simulator.py | amanparmar17/combat-simulator | 3ef258a58f667be0afe566e45e101d3e3073aa2f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 00:16:43 2019
@author: aman
"""
#%%
import random
#%% formation of army of user 2 i.e. computer
#consists of atleast 1 member
def inputmem2():
user2 = []
t = random.randint(0,10)
for i in range(0,t):
u = random.randint(1,3)
if(u==1):
user2.append("A")
if(u==2):
user2.append("S")
if(u==3):
user2.append("K")
archer2 = user2.count("A")
soldier2 = user2.count("S")
knight2 = user2.count("K")
print("Player2 army consists of:")
print("Knights: ",knight2)
print("Archers: ",archer2)
print("Soldiers: ",soldier2)
return user2
#%% formation of army of user1----------METHOD1
def inputmem1():
user1 = input().split(" ")
n = user1.count('')
for i in range(0,n):
user1.remove('')
archer1 = user1.count("A")
soldier1 = user1.count("S")
knight1 = user1.count("K")
if(len(user1) > 10 or archer1 > 10 or soldier1 > 10 or knight1 > 10):
print("Sorry wrong input, please try again")
inputmem1()
print("Your army consists of:")
print("knights: ",knight1)
print("Archers: ",archer1)
print("Soldiers: ",soldier1)
return user1
#%% toss to select the first turn
def toss():
t = random.randint(1,2)
if(t==1):
print("User1 plays first")
else:
print("User2 plays first")
#%%
def fight(score1,score2,i,j):
Player1 = user1[i]
Player2 = user2[j]
return battle(Player1, Player2, i, j, score1, score2)
#%% fucntion to start the battle
#def fight(score1,score2,i,j,user1,user2):
# Player1 = user1[i]
# Player2 = user2[j]
# return battle(Player1, Player2, i, j, score1, score2,user1,user2)
#%% battle function redefined
def compare(Player1,Player2):
if(Player1=="A" and Player2=="S"):
return Player1
if(Player1=="S" and Player2=="A"):
return Player2
if(Player1=="A" and Player2=="A"):
return "tie"
if(Player1=="A" and Player2=="K"):
return Player2
if(Player1=="S" and Player2=="S"):
return "tie"
if(Player1=="S" and Player2=="K"):
return Player1
if(Player1=="K" and Player2=="K"):
return "tie"
if(Player1=="K" and Player2=="S"):
return Player2
if(Player1=="K" and Player2=="A"):
return Player1
def battle(Player1, Player2, i, j, score1, score2):
if(score1==0 and score2==0):
return 3
if(score1==0):
return 2
if(score2==0):
return 1
winner = compare(Player1, Player2)
if(winner=="tie"):
score1 -= 1
score2 -= 1
i += 1
j += 1
if(winner==Player1):
score2 -= 1
j += 1
if(winner==Player2):
score1 -= 1
i += 1
if(i<len(user1) and j<len(user2)):
Player1 = user1[i]
Player2 = user2[j]
elif(i>=len(user1)):
return 2
else:
return 1
print("Player1: {0}, Player2:{1}".format(Player1, Player2))
print("Winner: {1} : Player1 Score: {2}, Player2 Score: {0} ".format(score2,winner,score1))
print("\n")
return battle(Player1, Player2, i, j, score1, score2)
#%% main block to run the whole code
if __name__=='__main__':
print("Enter the army members in the order you want them to battle (maximum 10)")
print("###########\nFor Soldier: S \nFor Knight : K \nFor Archer : A\n###########")
user1 = inputmem1()
user2 = inputmem2()
score1 = len(user1)
score2 = len(user2)
print(end = '\n')
i = j = 0
oo = fight(score1,score2,i,j)
if(oo==3):
print("The match is a tie")
if(oo==2):
print("The winner of the match is Player2")
if(oo==1):
print("The winner of the match is Player1") | 27.056338 | 95 | 0.557002 |
41c81327843d3cf76e230cf0969e5d23621f8451 | 827 | py | Python | couchdiscover/__init__.py | enyachoke/couchdiscover | 95b5d2dde9ee7e2255edfa9825ac5c1bf5cac8b5 | [
"Apache-2.0"
] | null | null | null | couchdiscover/__init__.py | enyachoke/couchdiscover | 95b5d2dde9ee7e2255edfa9825ac5c1bf5cac8b5 | [
"Apache-2.0"
] | null | null | null | couchdiscover/__init__.py | enyachoke/couchdiscover | 95b5d2dde9ee7e2255edfa9825ac5c1bf5cac8b5 | [
"Apache-2.0"
] | null | null | null | """
CouchDiscover
~~~~~~~~~~~~~
CouchDB 2.0 Autodiscovery for Kubernetes Environments.
Example:
>>> import couchdiscover
>>> couchdiscover.entrypoints.main()
"""
from . import config, util, exceptions, kube, couch, manage, entrypoints
from .kube import KubeHostname, KubeAPIClient, KubeInterface
from .couch import CouchServer, CouchInitClient, CouchManager
from .manage import ClusterManager, ContainerEnvironment
from .exceptions import (
CouchDiscGeneralError,
CouchDiscHTTPError,
CouchAddNodeError,
InvalidKubeHostnameError
)
__title__ = 'couchdiscover'
__version__ = '0.2.5'
__author__ = "Joe Black <me@joeblack.nyc>"
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2020 Emmanuel Nyachoke'
util.setup_logging(
level=config.LOG_LEVEL, fmt=config.LOG_FORMAT, date=config.DATE_FORMAT)
| 25.84375 | 75 | 0.76179 |
34df0fec3386053088033e7d4b24b1354ca9aa94 | 8,447 | py | Python | msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/vendored_sdks/usersactions/operations/_users_mail_folders_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/vendored_sdks/usersactions/operations/_users_mail_folders_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/vendored_sdks/usersactions/operations/_users_mail_folders_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsersMailFoldersOperations(object):
"""UsersMailFoldersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_actions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def copy(
self,
user_id, # type: str
mail_folder_id, # type: str
body, # type: "models.PathsSdgf1MUsersUserIdMailfoldersMailfolderIdMicrosoftGraphCopyPostRequestbodyContentApplicationJsonSchema"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphMailFolder"
"""Invoke action copy.
Invoke action copy.
:param user_id: key: id of user.
:type user_id: str
:param mail_folder_id: key: id of mailFolder.
:type mail_folder_id: str
:param body: Action parameters.
:type body: ~users_actions.models.PathsSdgf1MUsersUserIdMailfoldersMailfolderIdMicrosoftGraphCopyPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphMailFolder, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphMailFolder
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphMailFolder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.copy.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'mailFolder-id': self._serialize.url("mail_folder_id", mail_folder_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PathsSdgf1MUsersUserIdMailfoldersMailfolderIdMicrosoftGraphCopyPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphMailFolder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
copy.metadata = {'url': '/users/{user-id}/mailFolders/{mailFolder-id}/microsoft.graph.copy'} # type: ignore
def move(
self,
user_id, # type: str
mail_folder_id, # type: str
body, # type: "models.Paths1Adv2SrUsersUserIdMailfoldersMailfolderIdMicrosoftGraphMovePostRequestbodyContentApplicationJsonSchema"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphMailFolder"
"""Invoke action move.
Invoke action move.
:param user_id: key: id of user.
:type user_id: str
:param mail_folder_id: key: id of mailFolder.
:type mail_folder_id: str
:param body: Action parameters.
:type body: ~users_actions.models.Paths1Adv2SrUsersUserIdMailfoldersMailfolderIdMicrosoftGraphMovePostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphMailFolder, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphMailFolder
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphMailFolder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.move.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'mailFolder-id': self._serialize.url("mail_folder_id", mail_folder_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths1Adv2SrUsersUserIdMailfoldersMailfolderIdMicrosoftGraphMovePostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphMailFolder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
move.metadata = {'url': '/users/{user-id}/mailFolders/{mailFolder-id}/microsoft.graph.move'} # type: ignore
| 46.927778 | 160 | 0.68545 |
26a9fbe8999a603cdf906290db9d03c8190fd2dd | 1,132 | py | Python | scripts/plotTotalBytesByProb.py | Sunnout/ECO-SYNC-Tree | 0112a861c456e29353bc81a82975586a9e4d23ea | [
"MIT"
] | 1 | 2021-12-06T16:23:20.000Z | 2021-12-06T16:23:20.000Z | scripts/plotTotalBytesByProb.py | Sunnout/ECO-SYNC-Tree | 0112a861c456e29353bc81a82975586a9e4d23ea | [
"MIT"
] | null | null | null | scripts/plotTotalBytesByProb.py | Sunnout/ECO-SYNC-Tree | 0112a861c456e29353bc81a82975586a9e4d23ea | [
"MIT"
] | null | null | null | import sys
import numpy as np
import matplotlib.pyplot as plt
from commonInformation import *
exp_name = sys.argv[1]
nodes = sys.argv[2]
protos = sys.argv[3]
payloads = sys.argv[4]
probs = sys.argv[5]
runs = sys.argv[6]
probs = probs.split(",")
protos = protos.split(",")
bytes = {}
for proto in protos:
bytes[proto] = []
for prob in probs:
bytes[proto].append(
float(get_value_by_key(file_name.format(exp_name, nodes, proto, payloads, prob, runs), "TOTAL_BYTES")) * 1e-9)
x = np.arange(len(probs))
width = 0.12
plt.rcParams.update({'font.size': 14})
fig = plt.figure()
ax = fig.add_subplot()
ax.set_xticks(x)
ax.set_xticklabels(map(lambda a: "p = " + a, probs))
plt.ylabel('Total Bandwidth Usage (GBytes)')
space = width * len(protos)
idx = 0
for proto in protos:
ax.bar(x - (space / 2) + idx * width + width / 2, bytes[proto], width, label=alg_mapper[proto],
color=color_mapper[proto], edgecolor="black")
idx += 1
plt.tight_layout()
ax.legend()
plt.savefig(f'../plots/bytes/total_gbytes_{exp_name}_{nodes}_{protos}_{payloads}_{probs}_{runs}.pdf', format='pdf')
plt.close(fig)
| 25.727273 | 122 | 0.672261 |
25a9e803eca1d93a5c202af67d8ced4956fb87e5 | 988 | py | Python | tests/unit/pipert/core/utils/dummy_routines/dummy_destination_routine.py | OperationalBina/PipeRT2 | 2033f7c55d4eeca4749f6a0cc151a65863d3dfde | [
"MIT"
] | 1 | 2021-11-23T17:20:11.000Z | 2021-11-23T17:20:11.000Z | tests/unit/pipert/core/utils/dummy_routines/dummy_destination_routine.py | OperationalBina/PipeRT2 | 2033f7c55d4eeca4749f6a0cc151a65863d3dfde | [
"MIT"
] | 124 | 2021-05-10T06:35:48.000Z | 2022-03-10T09:25:27.000Z | tests/unit/pipert/core/utils/dummy_routines/dummy_destination_routine.py | OperationalBina/PipeRT2 | 2033f7c55d4eeca4749f6a0cc151a65863d3dfde | [
"MIT"
] | 4 | 2021-09-12T08:10:10.000Z | 2021-11-29T12:10:20.000Z | from pipert2.core.base.routines import DestinationRoutine
from tests.unit.pipert.core.utils.dummy_routines.dummy_middle_routine import DUMMY_ROUTINE_EVENT
class DummyDestinationRoutine(DestinationRoutine):
def __init__(self, name="dummy_end_routine"):
super(DummyDestinationRoutine, self).__init__(name=name)
self.counter = 0
def main_logic(self, data) -> None:
self.counter += 1
def setup(self) -> None:
pass
def cleanup(self) -> None:
pass
class DummyDestinationRoutineException(DestinationRoutine):
def __init__(self, counter=0, **kwargs):
super().__init__(**kwargs)
self.counter = counter
self.inc = True
def main_logic(self, data):
raise Exception()
def setup(self) -> None:
self.counter = 0
def cleanup(self) -> None:
pass
@DestinationRoutine.events(DUMMY_ROUTINE_EVENT.event_name)
def change_logic(self):
self.inc = not self.inc | 25.333333 | 96 | 0.673077 |
07a40a3f426c6bcfe63bf6dac79384d9025726b0 | 5,072 | py | Python | tests/engine.py | vasjapavlovic/eda5 | bc4b387b24239ea1dfb927657f05ddabbf707479 | [
"BSD-3-Clause"
] | null | null | null | tests/engine.py | vasjapavlovic/eda5 | bc4b387b24239ea1dfb927657f05ddabbf707479 | [
"BSD-3-Clause"
] | null | null | null | tests/engine.py | vasjapavlovic/eda5 | bc4b387b24239ea1dfb927657f05ddabbf707479 | [
"BSD-3-Clause"
] | null | null | null | from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("eda5", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("eda5", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
| 34.040268 | 114 | 0.626774 |
85ce2a048c97713b649a703c4f3ecd2afd2894d3 | 1,635 | py | Python | examples/security/roles.py | checkaayush/eve | 8031797458d6edd17f85df92b340be368313f12d | [
"BSD-3-Clause"
] | 3,122 | 2017-03-08T12:47:07.000Z | 2022-03-31T10:55:19.000Z | examples/security/roles.py | checkaayush/eve | 8031797458d6edd17f85df92b340be368313f12d | [
"BSD-3-Clause"
] | 559 | 2017-03-08T08:51:40.000Z | 2022-03-29T09:33:40.000Z | examples/security/roles.py | checkaayush/eve | 8031797458d6edd17f85df92b340be368313f12d | [
"BSD-3-Clause"
] | 428 | 2017-03-07T16:03:16.000Z | 2022-03-18T02:05:23.000Z | # -*- coding: utf-8 -*-
"""
Auth-SHA1/HMAC-Roles
~~~~~~~~~~~~~~~~~~~~
Securing an Eve-powered API with Basic Authentication (RFC2617) and user
roles.
This script assumes that user accounts are stored in an 'accounts' MongoDB
collection, that passwords are stored as SHA1/HMAC hashes and that user
roles are stored in a 'roles' array. All API resources/methods will be
secured unless they are made explicitly public (by fiddling with some
settings you can open one or more resources and/or methods to public access
-see docs).
Since we are using werkzeug we don't need any extra import (werkzeug being
one of Flask/Eve prerequisites).
Checkout Eve at https://github.com/pyeve/eve
This snippet by Nicola Iarocci can be used freely for anything you like.
Consider it public domain.
"""
from eve import Eve
from eve.auth import BasicAuth
from werkzeug.security import check_password_hash
from settings_security import SETTINGS
class RolesAuth(BasicAuth):
def check_auth(self, username, password, allowed_roles, resource, method):
# use Eve's own db driver; no additional connections/resources are used
accounts = app.data.driver.db["accounts"]
lookup = {"username": username}
if allowed_roles:
# only retrieve a user if his roles match ``allowed_roles``
lookup["roles"] = {"$in": allowed_roles}
account = accounts.find_one(lookup)
return account and check_password_hash(account["password"], password)
if __name__ == "__main__":
app = Eve(auth=RolesAuth, settings=SETTINGS)
app.run()
| 34.0625 | 79 | 0.699083 |
74c532babd56231afdea7221af82f9047266dc0a | 216 | py | Python | flaskwebapp/web/app.py | kurianbenoy/Docker-101 | 862d188d402e77be74994be0f07417b55d1b8dde | [
"MIT"
] | null | null | null | flaskwebapp/web/app.py | kurianbenoy/Docker-101 | 862d188d402e77be74994be0f07417b55d1b8dde | [
"MIT"
] | null | null | null | flaskwebapp/web/app.py | kurianbenoy/Docker-101 | 862d188d402e77be74994be0f07417b55d1b8dde | [
"MIT"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello_whale():
return render_template("whale_hello.html")
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 18 | 46 | 0.694444 |
15a665b19c6687f8818b342c5274f9a2594ff993 | 5,249 | py | Python | clusterapp/features/audio.py | ealmuina/thesis | d436ae0c6f775c56b2072889ceafae1507291c74 | [
"MIT"
] | 1 | 2018-02-11T07:36:31.000Z | 2018-02-11T07:36:31.000Z | clusterapp/features/audio.py | ealmuina/thesis | d436ae0c6f775c56b2072889ceafae1507291c74 | [
"MIT"
] | null | null | null | clusterapp/features/audio.py | ealmuina/thesis | d436ae0c6f775c56b2072889ceafae1507291c74 | [
"MIT"
] | null | null | null | from librosa.feature import mfcc
from .FreqParameters.BandwidthFreqParameter import BandwidthFreqParameter
from .FreqParameters.EnergyFreqParameter import EnergyFreqParameter
from .FreqParameters.EntropyFreqParameter import EntropyFreqParameter
from .FreqParameters.FluxFreqParameter import FluxFreqParameter
from .FreqParameters.MaxFreqParameter import MaxFreqParameter
from .FreqParameters.MinFreqParameter import MinFreqParameter
from .FreqParameters.PeakAmpFreqParameter import PeakAmpFreqParameter
from .FreqParameters.PeakFreqParameter import PeakFreqParameter
from .FreqParameters.PeaksAboveFreqParameter import PeaksAboveFreqParameter
from .FreqParameters.RmsFreqParameter import RmsFreqParameter
from .FreqParameters.RollOffFreqParameter import RollOffFreqParameter
from .FreqParameters.ShannonEntropyFreqParameter import ShannonEntropyFreqParameter
from .FreqParameters.SpectralCentroidParameter import SpectralCentroidParameter
from .TimeParameters.AutocorrelationTimeParameter import AutocorrelationTimeParameter
from .TimeParameters.DistanceToMaxTimeParameter import DistanceToMaxTimeParameter
from .TimeParameters.DurationTimeParameter import DurationTimeParameter
from .TimeParameters.EndTimeParameter import EndTimeParameter
from .TimeParameters.EnergyTimeParameter import EnergyTimeParameter
from .TimeParameters.MeanAmplitudeTimeParameter import MeanAmplitudeTimeParameter
from .TimeParameters.PeakToPeakTimeParameter import PeakToPeakTimeParameter
from .TimeParameters.RmsTimeParameter import RmsTimeParameter
from .TimeParameters.StartTimeParameter import StartTimeParameter
from .TimeParameters.StdAmplitudeTimeParameter import StdAmplitudeTimeParameter
from .TimeParameters.TimeCentroidParameter import TimeCentroidParameter
from .TimeParameters.VarianceAmplitudeTimeParameter import VarianceAmplitudeTimeParameter
from .TimeParameters.ZcrTimeParameter import ZcrTimeParameter
from .segment import Segment
from .signal import Signal
from .utils import *
"""Time Parameters Instances"""
std_t = StdAmplitudeTimeParameter()
var_t = VarianceAmplitudeTimeParameter()
mean_t = MeanAmplitudeTimeParameter()
cent_t = TimeCentroidParameter()
energy_t = EnergyTimeParameter()
corr_t = AutocorrelationTimeParameter()
disttomax_t = DistanceToMaxTimeParameter()
start_t = StartTimeParameter()
end_t = EndTimeParameter()
dur_t = DurationTimeParameter()
rms_t = RmsTimeParameter()
ptp_t = PeakToPeakTimeParameter()
zcr_t = ZcrTimeParameter()
"""Spectral Parameters Instances"""
max_f = MaxFreqParameter(total=True)
min_f = MinFreqParameter(total=True)
bandwidth_f = BandwidthFreqParameter(total=True)
peaks_above_f = PeaksAboveFreqParameter()
energy_f = EnergyFreqParameter()
rms_f = RmsFreqParameter()
s_centroid = SpectralCentroidParameter()
peak_f = PeakFreqParameter()
peak_amp_f = PeakAmpFreqParameter()
s_entropy = ShannonEntropyFreqParameter()
entropy_f = EntropyFreqParameter()
roll_off_f = RollOffFreqParameter()
flux_f = FluxFreqParameter()
class Audio:
def __init__(self, path, string_path=False):
file, self.fs = load_file(str(path) if string_path else path)
signal = Signal(file, self.fs)
signal.set_window('hann')
self.segment = Segment(signal, 0, len(signal.data) - 1)
self.name = path.name
self._build_temporal_features()
self._build_spectral_features()
self._build_mfcc()
def __getattr__(self, item):
if item in self.segment.measures_dict:
return self.segment.measures_dict[item]
raise AttributeError(item)
def _build_mfcc(self):
coeffs = mfcc(self.segment.data, self.fs, n_mfcc=13)[1:]
self.segment.measures_dict['MFCC'] = coeffs
def _build_spectral_features(self):
locations = ['start', 'end', 'centre', 'max', 'max_amp']
for l in locations:
max_f.measure(self.segment, threshold=-20, location=l)
min_f.measure(self.segment, threshold=-20, location=l)
bandwidth_f.measure(self.segment, threshold=-20, location=l)
peaks_above_f.measure(self.segment, threshold=-20, location=l)
energy_f.measure(self.segment, location=l)
rms_f.measure(self.segment, location=l)
s_centroid.measure(self.segment, location=l)
s_entropy.measure(self.segment, location=l)
entropy_f.measure(self.segment, location=l)
peak_f.measure(self.segment, location=l)
peak_amp_f.measure(self.segment, location=l)
roll_off_f.measure(self.segment, location=l, cutoff=.95)
flux_f.measure(self.segment, location=l)
def _build_temporal_features(self):
corr_t.measure(self.segment, offset=0)
std_t.measure(self.segment, envelope='hilbert')
var_t.measure(self.segment, envelope='hilbert')
mean_t.measure(self.segment, envelope='hilbert')
cent_t.measure(self.segment, envelope='hilbert')
energy_t.measure(self.segment, envelope='hilbert')
zcr_t.measure(self.segment)
dur_t.measure(self.segment)
rms_t.measure(self.segment)
ptp_t.measure(self.segment)
start_t.measure(self.segment)
end_t.measure(self.segment)
disttomax_t.measure(self.segment)
| 42.330645 | 89 | 0.777862 |
604fd07f88ac325be6b00962aad9545c1d10edca | 8,206 | py | Python | art/attacks/projected_gradient_descent_unittest.py | jizongFox/adversarial-robustness-toolbox | 0649fe44d42bc7ba39a4b1a2ff95a31320fd1ae5 | [
"MIT"
] | null | null | null | art/attacks/projected_gradient_descent_unittest.py | jizongFox/adversarial-robustness-toolbox | 0649fe44d42bc7ba39a4b1a2ff95a31320fd1ae5 | [
"MIT"
] | null | null | null | art/attacks/projected_gradient_descent_unittest.py | jizongFox/adversarial-robustness-toolbox | 0649fe44d42bc7ba39a4b1a2ff95a31320fd1ae5 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import keras
import keras.backend as k
import numpy as np
import tensorflow as tf
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as optim
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from keras.models import Sequential
from art.attacks.projected_gradient_descent import ProjectedGradientDescent
from art.classifiers import KerasClassifier, PyTorchClassifier, TFClassifier
from art.utils import load_mnist, get_labels_np_array, master_seed
logger = logging.getLogger('testLogger')
BATCH_SIZE = 10
NB_TRAIN = 100
NB_TEST = 11
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(1, 16, 5)
self.pool = nn.MaxPool2d(2, 2)
self.fc = nn.Linear(2304, 10)
def forward(self, x):
x = self.pool(f.relu(self.conv(x)))
x = x.view(-1, 2304)
logit_output = self.fc(x)
return logit_output
class TestPGD(unittest.TestCase):
@classmethod
def setUpClass(cls):
k.set_learning_phase(1)
# Get MNIST
(x_train, y_train), (x_test, y_test), _, _ = load_mnist()
x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST]
cls.mnist = (x_train, y_train), (x_test, y_test)
# Keras classifier
cls.classifier_k = cls._cnn_mnist_k([28, 28, 1])
cls.classifier_k.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)
scores = cls.classifier_k._model.evaluate(x_train, y_train)
print("\n[Keras, MNIST] Accuracy on training set: %.2f%%" % (scores[1] * 100))
scores = cls.classifier_k._model.evaluate(x_test, y_test)
print("\n[Keras, MNIST] Accuracy on test set: %.2f%%" % (scores[1] * 100))
# Create basic CNN on MNIST using TensorFlow
cls.classifier_tf = cls._cnn_mnist_tf([28, 28, 1])
cls.classifier_tf.fit(x_train, y_train, nb_epochs=2, batch_size=BATCH_SIZE)
scores = get_labels_np_array(cls.classifier_tf.predict(x_train))
acc = np.sum(np.argmax(scores, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]
print('\n[TF, MNIST] Accuracy on training set: %.2f%%' % (acc * 100))
scores = get_labels_np_array(cls.classifier_tf.predict(x_test))
acc = np.sum(np.argmax(scores, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
print('\n[TF, MNIST] Accuracy on test set: %.2f%%' % (acc * 100))
# Create basic PyTorch model
cls.classifier_py = cls._cnn_mnist_py()
x_train, x_test = np.swapaxes(x_train, 1, 3), np.swapaxes(x_test, 1, 3)
cls.classifier_py.fit(x_train, y_train, nb_epochs=2, batch_size=BATCH_SIZE)
scores = get_labels_np_array(cls.classifier_py.predict(x_train))
acc = np.sum(np.argmax(scores, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]
print('\n[PyTorch, MNIST] Accuracy on training set: %.2f%%' % (acc * 100))
scores = get_labels_np_array(cls.classifier_py.predict(x_test))
acc = np.sum(np.argmax(scores, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
print('\n[PyTorch, MNIST] Accuracy on test set: %.2f%%' % (acc * 100))
def setUp(self):
# Set master seed
master_seed(1234)
def test_mnist(self):
# Define all backends to test
backends = {'keras': self.classifier_k,
'tf': self.classifier_tf,
'pytorch': self.classifier_py}
for _, classifier in backends.items():
if _ == 'pytorch':
self._swap_axes()
self._test_backend_mnist(classifier)
if _ == 'pytorch':
self._swap_axes()
def _swap_axes(self):
(x_train, y_train), (x_test, y_test) = self.mnist
x_train = np.swapaxes(x_train, 1, 3)
x_test = np.swapaxes(x_test, 1, 3)
self.mnist = (x_train, y_train), (x_test, y_test)
def _test_backend_mnist(self, classifier):
# Get MNIST
(x_train, y_train), (x_test, y_test) = self.mnist
# Test PGD with np.inf norm
attack = ProjectedGradientDescent(classifier, eps=1, eps_step=0.1)
x_train_adv = attack.generate(x_train)
x_test_adv = attack.generate(x_test)
self.assertFalse((x_train == x_train_adv).all())
self.assertFalse((x_test == x_test_adv).all())
train_y_pred = get_labels_np_array(classifier.predict(x_train_adv))
test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))
self.assertFalse((y_train == train_y_pred).all())
self.assertFalse((y_test == test_y_pred).all())
acc = np.sum(np.argmax(train_y_pred, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]
print('\nAccuracy on adversarial train examples: %.2f%%' % (acc * 100))
acc = np.sum(np.argmax(test_y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
print('\nAccuracy on adversarial test examples: %.2f%%' % (acc * 100))
@staticmethod
def _cnn_mnist_tf(input_shape):
labels_tf = tf.placeholder(tf.float32, [None, 10])
inputs_tf = tf.placeholder(tf.float32, [None] + list(input_shape))
# Define the tensorflow graph
conv = tf.layers.conv2d(inputs_tf, 4, 5, activation=tf.nn.relu)
conv = tf.layers.max_pooling2d(conv, 2, 2)
fc = tf.contrib.layers.flatten(conv)
# Logits layer
logits = tf.layers.dense(fc, 10)
# Train operator
loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=labels_tf))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_tf = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
classifier = TFClassifier((0, 1), inputs_tf, logits, loss=loss, train=train_tf, output_ph=labels_tf,
sess=sess)
return classifier
@staticmethod
def _cnn_mnist_k(input_shape):
# Create simple CNN
model = Sequential()
model.add(Conv2D(4, kernel_size=(5, 5), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01),
metrics=['accuracy'])
classifier = KerasClassifier((0, 1), model, use_logits=False)
return classifier
@staticmethod
def _cnn_mnist_py():
model = Model()
# Define a loss function and optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
# Get classifier
classifier = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 10)
return classifier
if __name__ == '__main__':
unittest.main()
| 40.029268 | 120 | 0.662077 |
6c697716756762b9284a9ffe5f1e61e71bd61fd2 | 950 | py | Python | pridesport_work/sportgoods/models.py | Trifon87/pridesport_work | 9ab47d3c58915c2e791bf8a1fcb3ceee1d8de62c | [
"MIT"
] | null | null | null | pridesport_work/sportgoods/models.py | Trifon87/pridesport_work | 9ab47d3c58915c2e791bf8a1fcb3ceee1d8de62c | [
"MIT"
] | null | null | null | pridesport_work/sportgoods/models.py | Trifon87/pridesport_work | 9ab47d3c58915c2e791bf8a1fcb3ceee1d8de62c | [
"MIT"
] | null | null | null | # from django.db import models
#
#
# class Gear(models.Model):
# FIGHT = 'fight'
# FITNESS = 'fitness'
# CLOTHING = 'clothing'
# UN = 'unknown'
# SPORT_TYPES = (
# (FIGHT, "fight"),
# (FITNESS, "fitness"),
# (CLOTHING, 'clothing'),
# (UN, 'Unknown')
# )
# type = models.CharField(max_length=35, choices=SPORT_TYPES, default=UN)
# name = models.CharField(max_length= 35, blank=False)
# price = models.FloatField(blank=False)
# description = models.TextField(blank=True)
# image_url = models.ImageField(
# upload_to='gear',
# )
#
# def __str__(self):
# return self.name
#
#
# # class Like(models.Model):
# # gear = models.ForeignKey(Gear, on_delete= models.CASCADE)
# # # test = models.CharField(str=())
#
#
# class Comment(models.Model):
# gear = models.ForeignKey(Gear, on_delete=models.CASCADE)
# text = models.TextField(blank=False) | 27.941176 | 77 | 0.6 |
b9c163758172bb02824805c579747969d8bda114 | 506 | py | Python | src/sortedArrayToBST/bst.py | rajitbanerjee/leetcode | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | [
"CC0-1.0"
] | null | null | null | src/sortedArrayToBST/bst.py | rajitbanerjee/leetcode | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | [
"CC0-1.0"
] | null | null | null | src/sortedArrayToBST/bst.py | rajitbanerjee/leetcode | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | [
"CC0-1.0"
] | 1 | 2021-04-28T18:17:55.000Z | 2021-04-28T18:17:55.000Z | # Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
mid = len(nums) // 2
node = TreeNode(nums[mid])
node.left = self.sortedArrayToBST(nums[:mid])
node.right = self.sortedArrayToBST(nums[mid + 1:])
return node
| 24.095238 | 60 | 0.590909 |
bd0a728861e2c56ce95ea7bc28d983174f63baaf | 4,515 | py | Python | ped_dialog/file_browse.py | jpfxgood/ped | f753ca27e4462c321ed28f00e1ef47fbde62990e | [
"MIT"
] | null | null | null | ped_dialog/file_browse.py | jpfxgood/ped | f753ca27e4462c321ed28f00e1ef47fbde62990e | [
"MIT"
] | 21 | 2020-07-03T13:14:15.000Z | 2020-07-14T14:27:43.000Z | ped_dialog/file_browse.py | jpfxgood/ped | f753ca27e4462c321ed28f00e1ef47fbde62990e | [
"MIT"
] | null | null | null | # Copyright 2009 James P Goodwin ped tiny python editor
""" module to implement a file browse dialog component for the ped editor """
import curses
import curses.ascii
from ped_dialog import dialog
from ped_core import editor_common
from ped_core import keytab
class FileBrowseComponent(dialog.Component):
""" component subclass for embedding a read-only editor in a dialog to do preview of files """
def __init__(self, name, order, x, y, width, height, label, filename, showname = True ):
""" name, order== tab order, x,y offset in dialog, width,height size in chars, label title for border, filename file to show in editor """
dialog.Component.__init__(self, name, order )
self.x = x
self.y = y
self.width = width
self.height = height
self.ewin = None
self.editor = None
self.filename = filename
self.start_line = 0
self.label = label
self.isfocus = None
self.showname = showname
def __del__(self):
""" clean up window, editor and workfile if we get deleted """
self.reset()
def reset(self):
""" reset stuff """
if self.ewin:
del self.ewin
self.ewin = None
if self.editor:
self.editor.getWorkfile().close()
self.editor = None
def setpos(self, x, y ):
""" set the position """
self.reset()
dialog.Component.setpos(self, x, y)
def setsize(self, height, width ):
""" set the size """
self.reset()
dialog.Component.setsize(self, height, width )
def mouse_event(self, ox, oy, mtype):
""" handle mouse events return key value or -1 for not handled """
if self.editor and (ox >= self.x and ox < self.x+self.width and oy >= self.y and oy <= self.y+self.height):
oy = (oy - self.y) - 2
ox = (ox - self.x) - 1
if oy >= 0 and ox >= 0 and (mtype & (curses.BUTTON1_CLICKED | curses.BUTTON1_PRESSED | curses.BUTTON1_RELEASED)):
self.editor.goto(self.editor.line+oy,self.editor.left+ox)
return keytab.KEYTAB_CR
return -1
def render(self):
""" draw the frame for the component and the editor as needed """
win = self.getparent()
if win:
if self.isfocus:
attr = curses.A_BOLD
else:
attr = curses.A_NORMAL
dialog.rect(win,self.x,self.y,self.width,self.height,self.label,attr,False)
if not self.ewin:
self.ewin = win.subwin(self.height-2,self.width-2,self.y+1,self.x+1)
if not self.editor and self.filename:
self.editor = editor_common.ReadonlyEditor(win,self.ewin,self.filename, self.showname)
self.editor.goto(self.start_line,0)
self.editor.mark_lines()
self.editor.invalidate_all()
self.editor.main(False)
if self.editor:
self.editor.setfocus(self.isfocus)
self.editor.redraw()
win.refresh()
self.isfocus = False
def focus(self):
""" indicates that we have the focus """
self.isfocus = True
def setvalue(self,value):
""" this component doesn't really do setvalue """
pass
def getvalue(self):
""" returns the current line in the editor """
if self.editor:
return self.editor.getCurrentLine()
else:
return ""
def setfilename(self,filename,number):
""" set a new file to view and a line number to scroll to, clean up the old one """
self.filename = filename
self.start_line = number
if self.editor:
self.editor.getWorkfile().close()
del self.editor
self.editor = None
def handle(self,ch):
""" translate the editor keys for component use """
if self.editor:
o_line = self.editor.getLine()
ret_ch = self.editor.main(False,ch)
if self.editor.getLine() != o_line or not self.editor.isMark():
if self.editor.isMark():
self.editor.mark_lines()
self.editor.mark_lines()
else:
ret_ch = ch
if ret_ch in [keytab.KEYTAB_SPACE,keytab.KEYTAB_CR,keytab.KEYTAB_TAB,keytab.KEYTAB_ESC,keytab.KEYTAB_BTAB]:
return ret_ch
else:
return dialog.Component.CMP_KEY_NOP
| 35.551181 | 146 | 0.580952 |
59a7edc3dcd08f47352970f2c94d94131a45b106 | 1,713 | py | Python | server/api/models.py | yzhfury/ReserveMe | 1969626da931c335ec2ee3bf8708e2995131bb48 | [
"MIT"
] | 3 | 2020-11-15T05:09:13.000Z | 2020-11-16T13:27:10.000Z | server/api/models.py | yzhfury/ReserveMe | 1969626da931c335ec2ee3bf8708e2995131bb48 | [
"MIT"
] | null | null | null | server/api/models.py | yzhfury/ReserveMe | 1969626da931c335ec2ee3bf8708e2995131bb48 | [
"MIT"
] | 1 | 2020-11-15T05:21:46.000Z | 2020-11-15T05:21:46.000Z | import settings
import uuid
import jwt
from datetime import datetime, timedelta
from flask import g
from api.conf.auth import auth
from api.database.database import db
from sqlalchemy.dialects.postgresql import UUID, ARRAY
import api.errors as error
import os
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(128))
password = db.Column(db.String(128))
email = db.Column(db.String(128))
user_role = db.Column(db.String, default='user')
user_rev_id = db.Column(UUID(as_uuid=True))
def generate_token(self, permission_level):
payload = {
'exp': datetime.utcnow()+timedelta(minutes=60),
'iat': datetime.utcnow(),
'user': self.email,
'sub': permission_level
}
access_token = jwt.encode(payload, os.environ['SECRET_KEY'],algorithm='HS256')
return access_token
@staticmethod
@auth.verify_token
def verify_token(token):
g.user = None
try:
decoded = jwt.decode(token,os.environ['SECRET_KEY'],algorithm='HS256')
except:
return False
if 'email' and 'sub' in decoded:
g.user = decoded['user']
g.admin = decoded['sub']
return True
return False
class Blacklist(db.Model):
id = db.Column(db.Integer, primary_key=True)
refresh_token = db.Column(db.String(length=256))
class Reservation(db.Model):
id = db.Column(db.Integer, primary_key=True)
rsv_uuid = db.Column(UUID(as_uuid=True))
rsv_name = db.Column(db.String(256))
due_date = db.Column(db.DateTime)
num_limit = db.Column(db.Integer)
num_now = db.Column(db.Integer) | 29.534483 | 86 | 0.649154 |
12d4ce5771ddf8db8c1025fac2efd43a416e4c89 | 2,048 | py | Python | python/akg/ops/math/abs_ad.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | 1 | 2020-08-31T02:43:43.000Z | 2020-08-31T02:43:43.000Z | python/akg/ops/math/abs_ad.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | python/akg/ops/math/abs_ad.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: abs_ad"""
import akg
from akg.ops.math import abs
from akg.utils import validation_check as vc_util
@vc_util.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor)
def abs_ad(head, in_data):
"""
Compute gradient of abs operator with automatic differentiate.
Args:
head (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
in_data (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
Returns:
tvm.tensor.Tensor has the same shape as input.
"""
dtype = in_data.dtype
# check head's validation.
vc_util.check_shape(head.shape)
vc_util.ops_dtype_check(head.dtype, vc_util.DtypeForDavinci.ALL_TYPES)
need_cast_dtype = ["int8", "int32", "uint8"]
abs_data = abs.abs_value(in_data)
if head.dtype in need_cast_dtype:
head = akg.tvm.compute(head.shape, lambda *indice: head(*indice).astype("float16"), name='head_cast')
if dtype in need_cast_dtype:
abs_data = akg.tvm.compute(abs_data.shape,
lambda *indice: abs_data(*indice).astype("float16"),
name='abs_cast')
jacs = list(akg.differentiate(abs_data, [in_data], head))
if dtype in need_cast_dtype:
jacs[0] = akg.tvm.compute(jacs[0].shape, lambda *indice: jacs[0](*indice).astype(dtype), name='res')
return jacs[0]
| 37.925926 | 109 | 0.693359 |
4382a5647c555d3158ad868507f532daf3500cef | 7,853 | py | Python | augustus/system/models/base_series.py | jialuechen/augustus | d4fbda427e3d9c60896b0e22c06cd593b484ef9d | [
"MIT"
] | 2 | 2019-09-13T18:49:17.000Z | 2022-01-25T05:14:05.000Z | augustus/system/models/base_series.py | jialuechen/augustus | d4fbda427e3d9c60896b0e22c06cd593b484ef9d | [
"MIT"
] | null | null | null | augustus/system/models/base_series.py | jialuechen/augustus | d4fbda427e3d9c60896b0e22c06cd593b484ef9d | [
"MIT"
] | 2 | 2019-02-28T21:23:04.000Z | 2020-07-02T01:23:24.000Z | import abc
from collections import UserDict, UserList, deque
from typing import Deque, Dict, List, Union
import pandas as pd
from augustus.constants import ActionType
from augustus.system.metabase_env import augustusEnvBase
class SeriesBase(augustusEnvBase, UserDict, abc.ABC):
name: str = None
def __init__(self, maxlen: int = None) -> None:
super().__init__()
self.data = {} # type:Dict[str,Deque]
for ticker in self.env.feeds:
if maxlen:
self.data[f'{ticker}_long'] = deque([
dict(date=self.env.fromdate, value=0)], maxlen=maxlen)
self.data[f'{ticker}_short'] = deque([
dict(date=self.env.fromdate, value=0)], maxlen=maxlen)
else:
self.data[f'{ticker}_long'] = [
dict(date=self.env.fromdate, value=0)]
self.data[f'{ticker}_short'] = [
dict(date=self.env.fromdate, value=0)]
def change_initial_value(self, ticker: str,
value: float, long_or_short: str):
self.data[f'{ticker}_{long_or_short}'][0]['value'] = value
def latest(self, ticker: str,
long_or_short: str, index: int = -1) -> float:
return self.data[f'{ticker}_{long_or_short}'][index]['value']
def total_value(self) -> float:
total = 0
for data_list in self.data.values():
per_dict = data_list[-1]
total += per_dict['value']
return total
def _append_value(self, ticker: str, value: float, long_or_short: str):
key = f'{ticker}_{long_or_short}'
if self.data[key][-1]['date'] == self.env.sys_date:
self.data[key][-1]['value'] = value
else:
self.data[key].append(
{'date': self.env.sys_date, 'value': value})
def dataframe(self) -> list:
dataframe_list = []
for ticker in self.env.tickers:
long_df = pd.DataFrame(self.data[f'{ticker}_long'])
short_df = pd.DataFrame(self.data[f'{ticker}_short'])
long_df.rename(columns=dict(
value=f'{self.name}_{ticker}_long'), inplace=True)
short_df.rename(columns=dict(
value=f'{self.name}_{ticker}_short'), inplace=True)
long_df = long_df[~long_df.date.duplicated(keep='last')]
short_df = short_df[~short_df.date.duplicated(keep='last')]
long_df.set_index('date', inplace=True)
long_df.index = pd.to_datetime(long_df.index)
short_df.set_index('date', inplace=True)
short_df.index = pd.to_datetime(short_df.index)
dataframe_list.append(long_df)
dataframe_list.append(short_df)
return dataframe_list
def single_dataframe(self) ->pd.DataFrame:
dataframe_list = []
for value in self.data.values():
if len(value) == 1:
continue
df = pd.DataFrame(value)
df = df[~df.date.duplicated(keep='last')]
df.set_index('date', inplace=True)
df.index = pd.to_datetime(df.index)
dataframe_list.append(df)
result = sum(dataframe_list)
return result
def plot(self, ticker: str):
long_df = pd.DataFrame(self.data[f'{ticker}_long'])
short_df = pd.DataFrame(self.data[f'{ticker}_short'])
long_df.rename(columns=dict(value=f'{self.name}_long'), inplace=True)
short_df.rename(columns=dict(value=f'{self.name}_short'), inplace=True)
total_df = long_df.merge(short_df, how='outer')
total_df.fillna(method='ffill', inplace=True)
total_df.set_index('date', inplace=True)
total_df.plot()
def get_barly_cur_price(self, ticker: str, order_executed: bool) -> float:
if order_executed:
return self.env.feeds[ticker].execute_price
else:
return self.env.feeds[ticker].open
@abc.abstractmethod
def update_order(self):
raise NotImplementedError
class MoneySeries(augustusEnvBase, UserList):
def __init__(self, name: str, initial_value: int,
maxlen: int = None) -> None:
super().__init__()
self.name = name
if maxlen:
self.data = deque(
[dict(date=self.env.fromdate, value=initial_value)],
maxlen=maxlen) # type: Deque[Dict[str,Union[str,float]]]
else:
# type: List[Dict[str,Union[str,float]]]
self.data = [dict(date=self.env.fromdate, value=initial_value)]
def change_initial_value(self, value: int):
self.data[0]['value'] = value
def latest(self, index: int = -1) -> float:
return self.data[index]['value']
def dataframe(self) -> pd.DataFrame:
dataframe = pd.DataFrame(self.data)
dataframe.rename(columns=dict(value=self.name), inplace=True)
dataframe.set_index('date', inplace=True)
dataframe.index = pd.to_datetime(dataframe.index)
result = dataframe[~dataframe.index.duplicated(keep='last')]
first = dataframe.ix[: 1]
result = pd.concat([first, result])
result.sort_index(inplace=True)
return result
def plot(self):
self.dataframe().plot()
class PositionSeries(SeriesBase):
name = 'position'
@staticmethod
def direction(action_type):
if action_type in [ActionType.Buy, ActionType.Short]:
return 1
elif action_type in [ActionType.Sell, ActionType.Cover]:
return -1
def update_order(self, ticker, size, action_type,
last_position, long_or_short='long'):
new_value = last_position + size * self.direction(action_type)
self._append_value(ticker, new_value, long_or_short)
class AvgPriceSeries(SeriesBase):
name = 'avg_price'
def update_order(self, ticker, size, execute_price, last_position,
last_avg_price, new_position, long_or_short='long'):
if new_position == 0:
new_value = 0
elif new_position > last_position:
new_value = (last_position * last_avg_price +
size*execute_price)/new_position
else:
new_value = last_avg_price
self._append_value(ticker, new_value, long_or_short)
class RealizedPnlSeriesBase(SeriesBase):
name = 'realized_pnl'
@abc.abstractmethod
def update_order(self, ticker, size, execute_price, action_type,
last_avg_price, long_or_short='long'):
raise NotImplementedError
class CommissionSeriesBase(SeriesBase):
name = 'commission'
@abc.abstractmethod
def update_order(self, ticker, size, execute_price, action_type,
last_commission, long_or_short='long'):
raise NotImplementedError
class HoldingPnlSeriesBase(SeriesBase):
name = 'holding_pnl'
@abc.abstractmethod
def update_order(self, ticker, cur_price, new_avg_price,
new_position, long_or_short='long'):
raise NotImplementedError
@abc.abstractmethod
def update_barly(self, order_executed: bool):
raise NotImplementedError
class MarketValueSeriesBase(SeriesBase):
name = 'market_value'
@abc.abstractmethod
def update_order(self, ticker, cur_price, new_position,
long_or_short='long'):
raise NotImplementedError
@abc.abstractmethod
def update_barly(self, order_executed: bool):
raise NotImplementedError
class MarginSeriesBase(SeriesBase):
name = 'margin'
@abc.abstractmethod
def update_order(self, ticker, long_or_short='long'):
raise NotImplementedError
@abc.abstractmethod
def update_barly(self):
raise NotImplementedError
| 31.538153 | 79 | 0.61607 |
4955f38a2f011ab324fd0dfa4c5c5d2e96edbf45 | 7,127 | py | Python | app/classes/http.py | runcitadel/crafty-web | 895dc1a8605825cf17a2cf87cfec416b8d258d09 | [
"Apache-2.0"
] | null | null | null | app/classes/http.py | runcitadel/crafty-web | 895dc1a8605825cf17a2cf87cfec416b8d258d09 | [
"Apache-2.0"
] | null | null | null | app/classes/http.py | runcitadel/crafty-web | 895dc1a8605825cf17a2cf87cfec416b8d258d09 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import json
import time
import secrets
import asyncio
import logging
import schedule
import threading
import tornado.web
import tornado.ioloop
import tornado.log
import tornado.template
import tornado.escape
import tornado.locale
import tornado.httpserver
from pathlib import Path
from app.classes.console import console
from app.classes.models import Crafty_settings, Webserver
from app.classes.ftp import ftp_svr_object
from app.classes.minecraft_server import mc_server
from app.classes.helpers import helper
from app.classes.web_sessions import web_session
from app.classes.multiserv import multi
from app.classes.handlers.base_handler import BaseHandler
from app.classes.handlers.default404 import My404Handler
from app.classes.handlers.public_handler import PublicHandler
from app.classes.handlers.admin_handler import AdminHandler
from app.classes.handlers.ajax_handler import AjaxHandler
from app.classes.handlers.setup_handler import SetupHandler
from app.classes.handlers.download_handler import DownloadHandler
import app.classes.api as api_routes
logger = logging.getLogger(__name__)
class webserver():
def __init__(self, mc_server):
self.mc_server = mc_server
self.ioloop = None
self.HTTPServer = None
def _asyncio_patch(self):
"""
As of Python 3.8 (on Windows), the asyncio default event handler has changed to "proactor",
where tornado expects the "selector" handler.
This function checks if the platform is windows and changes the event handler to suit.
(Taken from https://github.com/mkdocs/mkdocs/commit/cf2b136d4257787c0de51eba2d9e30ded5245b31)
"""
logger.debug("Checking if asyncio patch is required")
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import WindowsSelectorEventLoopPolicy
except ImportError:
logger.debug("asyncio patch isn't required")
pass # Can't assign a policy which doesn't exist.
else:
if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
logger.debug("Applied asyncio patch")
def log_function(self, handler):
info = {
'Status_Code': handler.get_status(),
'Method': handler.request.method,
'URL': handler.request.uri,
'Remote_IP': handler.request.remote_ip,
'Elapsed_Time': '%.2fms' % (handler.request.request_time() * 1000)
}
tornado.log.access_log.info(json.dumps(info, indent=4))
def run_tornado(self, silent=False):
# First, patch asyncio if needed
self._asyncio_patch()
# let's verify we have an SSL cert
helper.create_self_signed_cert()
websettings = Webserver.get()
crafty_settings = Crafty_settings.get()
lang = crafty_settings.language
port_number = websettings.port_number
web_root = helper.get_web_root_path()
logger.info("Starting Tornado HTTPS Server on port {}".format(port_number))
if not silent:
console.info("Starting Tornado HTTP Server on port {}".format(port_number))
console.info("http://{}:{} is up and ready for connection:".format(helper.get_local_ip(), port_number))
asyncio.set_event_loop(asyncio.new_event_loop())
tornado.template.Loader('.')
tornado.locale.set_default_locale(lang)
ip = helper.get_public_ip()
if not silent:
if ip:
console.info("Your public IP is: {}".format(ip))
else:
console.warning("Unable to find your public IP\nThe service might be down, or your internet is down.")
handlers = [
(r'/', PublicHandler),
(r'/([a-zA-Z]+)', PublicHandler),
(r'/admin/downloadbackup', DownloadHandler),
(r'/admin/(.*)', AdminHandler, dict(mcserver=self.mc_server)),
(r'/ajax/(.*)', AjaxHandler, dict(mcserver=self.mc_server)),
(r'/setup/(.*)', SetupHandler, dict(mcserver=self.mc_server)),
(r'/static(.*)', tornado.web.StaticFileHandler, {"path": '/'}),
(r'/images(.*)', tornado.web.StaticFileHandler, {"path": "/images"}),
# API routes
(r'/api/v1/host_stats', api_routes.GetHostStats, dict(mcserver=self.mc_server)),
(r'/api/v1/server_stats', api_routes.GetServerStats, dict(mcserver=self.mc_server)),
# Server related
(r'/api/v1/server/send_command', api_routes.SendCommand, dict(mcserver=self.mc_server)),
(r'/api/v1/server/get_logs', api_routes.GetMCLogs, dict(mcserver=self.mc_server)),
(r'/api/v1/server/search_logs', api_routes.SearchMCLogs, dict(mcserver=self.mc_server)),
(r'/api/v1/server/force_backup', api_routes.ForceServerBackup, dict(mcserver=self.mc_server)),
(r'/api/v1/server/start', api_routes.StartServer, dict(mcserver=self.mc_server)),
(r'/api/v1/server/stop', api_routes.StopServer, dict(mcserver=self.mc_server)),
(r'/api/v1/server/restart', api_routes.RestartServer, dict(mcserver=self.mc_server)),
(r'/api/v1/list_servers', api_routes.ListServers, dict(mcserver=self.mc_server)),
# Crafty related
(r'/api/v1/crafty/add_user', api_routes.CreateUser),
(r'/api/v1/crafty/del_user', api_routes.DeleteUser),
(r'/api/v1/crafty/get_logs', api_routes.GetCraftyLogs),
(r'/api/v1/crafty/search_logs', api_routes.SearchCraftyLogs)
]
cert_objects = {
'certfile': os.path.join(web_root, 'certs', 'crafty.crt'),
'keyfile': os.path.join(web_root, 'certs', 'crafty.key')
}
app = tornado.web.Application(
handlers,
template_path=os.path.join(web_root, 'templates'),
static_path=os.path.join(web_root, 'static'),
debug=True,
cookie_secret=helper.random_string_generator(20),
xsrf_cookies=True,
autoreload=False,
log_function=self.log_function,
login_url="/",
default_handler_class=My404Handler
)
self.http_server = tornado.httpserver.HTTPServer(app)
self.http_server.listen(port_number)
tornado.locale.load_translations(os.path.join(web_root, 'translations'))
self.ioloop = tornado.ioloop.IOLoop.instance()
self.ioloop.start()
def start_web_server(self, silent=False):
thread = threading.Thread(target=self.run_tornado, args=(silent, ), daemon=True, name='tornado_thread')
thread.start()
def stop_web_server(self):
logger.info("Shutting Down Tornado Web Server")
ioloop = self.ioloop
ioloop.stop()
self.http_server.stop()
logger.info("Tornado Server Stopped")
tornado_srv = webserver(mc_server)
| 38.733696 | 118 | 0.6575 |
1cf5b677de3a005a489585073464bee4498356fd | 1,709 | py | Python | app/core/migrations/0001_initial.py | arunjohn96/recipe_app_api | efc0a10cdd45c792f479089209d38f976c46b353 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | arunjohn96/recipe_app_api | efc0a10cdd45c792f479089209d38f976c46b353 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | arunjohn96/recipe_app_api | efc0a10cdd45c792f479089209d38f976c46b353 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.12 on 2019-09-18 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.264706 | 266 | 0.63897 |
7fd1d9aeca7dd46319b52af0cbfb4734840e9cfd | 13,696 | py | Python | scripts/extract_aa.py | davek44/Glimmer-MG | ced287371a118ff403e0a5c09735c154cffa34ec | [
"Artistic-1.0"
] | 5 | 2017-08-19T23:38:14.000Z | 2021-02-08T23:23:57.000Z | scripts/extract_aa.py | davek44/Glimmer-MG | ced287371a118ff403e0a5c09735c154cffa34ec | [
"Artistic-1.0"
] | 2 | 2015-10-31T19:54:37.000Z | 2018-06-05T21:47:12.000Z | scripts/extract_aa.py | davek44/Glimmer-MG | ced287371a118ff403e0a5c09735c154cffa34ec | [
"Artistic-1.0"
] | 5 | 2020-02-11T14:55:43.000Z | 2021-10-08T13:41:25.000Z | #!/usr/bin/env python
from optparse import OptionParser
import sys, string, pdb, sys, os
################################################################################
# extract_aa.py
#
# Make a fasta file of amino acid sequences from the Glimmer3 gene predictions.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] arg'
parser = OptionParser(usage)
parser.add_option('-s', dest='seqs_file', help='Sequence file')
parser.add_option('-p', dest='predict_file', help='Gene predictions file')
parser.add_option('-o', dest='output_file', help='Output amino acid fasta file')
(options,args) = parser.parse_args()
if not options.seqs_file:
parser.error('Must provide sequence file with -s')
elif not options.predict_file:
parser.error('Must provide prediction file with -p')
if options.output_file:
out_aa = open('%s.faa' % options.output_file, 'w')
out_dna = open('%s.ffn' % options.output_file, 'w')
else:
(base,ext) = os.path.splitext(options.seqs_file)
out_aa = open('%s.faa' % base, 'w')
out_dna = open('%s.ffn' % base, 'w')
frag_preds = get_preds(options.seqs_file, options.predict_file)
header = ''
for line in open(options.seqs_file):
if line[0] == '>':
if header:
print_frag_genes(out_aa, out_dna, header, seq, frag_preds[header])
header = line[1:].rstrip()
seq = ''
else:
seq += line.rstrip()
if header:
print_frag_genes(out_aa, out_dna, header, seq, frag_preds[header])
out_aa.close()
out_dna.close()
################################################################################
# gene_compare
#
# For sorting genes by start
################################################################################
def gene_compare(x, y):
return x.start - y.start
################################################################################
# get_preds
################################################################################
def get_preds(seqs_file, genepred_file):
# get fragment lengths
frag_lengths = {}
for line in open(seqs_file):
if line[0] == '>':
header = line[1:].rstrip()
frag_lengths[header] = 0
else:
frag_lengths[header] += len(line.rstrip())
# process genes
frag_preds = {}
for line in open(genepred_file):
if line[0] =='>':
header = line[1:].rstrip()
frag_preds[header] = []
indel_plusminus = 0
else:
a = line.split()
# indels
insertions = []
if len(a[5]) > 2:
insertions = [int(x)-1 for x in a[5][2:].split(',')]
deletions = []
if len(a[6]) > 2:
deletions = [int(x)-1 for x in a[6][2:].split(',')]
substitutions = []
if len(a[7]) > 2:
substitutions = [int(x)-1 for x in a[7][2:].split(',')]
if int(a[3]) > 0:
# forward
strand = 1
start = int(a[1])-1+indel_plusminus
indel_plusminus += len(deletions) - len(insertions)
end = int(a[2])+indel_plusminus
# partial on left
start_codon = True
if start < 0:
start_codon = False
# partial on right
stop_codon = True
if end > frag_lengths[header]+indel_plusminus:
stop_codon = False
else:
# reverse
strand = -1
start = int(a[2])-1+indel_plusminus
indel_plusminus += len(deletions) - len(insertions)
end = int(a[1])+indel_plusminus
# partial on left
stop_codon = True
if start < 0:
stop_codon = False
# partial on right
start_codon = True
if end > frag_lengths[header]+indel_plusminus:
start_codon = False
frag_preds[header].append(Pred(start, end, strand, start_codon, stop_codon, insertions, deletions, substitutions))
for header in frag_preds:
frag_preds[header].sort(gene_compare)
return frag_preds
################################################################################
# predict_msa
#
# If there were predicted insertions or deletions, add them to the MSA
################################################################################
def predict_msa(preds, seq):
frag_msa = [' ',' ',' '] + list(seq) + [' ',' ',' ']
# combine indels from genes
insertions = []
deletions = []
substitutions = []
for p in preds:
insertions += p.insertions
deletions += p.deletions
substitutions += p.substitutions
insertions.sort()
deletions.sort()
substitutions.sort()
del_len = len(deletions)
ins_len = len(insertions)
sub_len = len(substitutions)
if del_len == ins_len == sub_len == 0:
pred_msa = frag_msa
else:
i = 0 # index into insertions
d = 0 # index into deletions
s = 0 # index into substitutions
p = 3 # index into pred msa
# m = 0 # index into frag msa (unadjusted)
f = 0 # index into frag seq
pred_msa = [' ']*(len(frag_msa)+del_len)
old_msa_len = len(frag_msa)
for m in range(3,old_msa_len-3):
if i < ins_len and insertions[i] == f:
# insertion
pred_msa[p] = '-'
if frag_msa[p] != '-':
f += 1
p += 1
i += 1
elif d < del_len and deletions[d] == f:
# deletion
frag_msa.insert(p,'-')
pred_msa[p] = pred_msa[p-1] # assuming deletions were homopolymer
p += 1
d += 1
pred_msa[p] = frag_msa[p]
if frag_msa[p] != '-':
f += 1
p += 1
elif s < sub_len and substitutions[s] == f:
# substitution (change stop codon)
if frag_msa[p] == '-':
print >> sys.stderr, 'Hit a gap where a substitution should be:'
print >> sys.stderr, seq
exit(1)
elif frag_msa[p] == 'C':
pred_msa[p] = 'G'
else:
pred_msa[p] = 'C'
f += 1
p += 1
s += 1
else:
# normal
pred_msa[p] = frag_msa[p]
if frag_msa[p] != '-':
f += 1
p += 1
return pred_msa
################################################################################
# print_frag_genes
#
# Print all genes from this fragment
################################################################################
def print_frag_genes(out_aa, out_dna, header, seq, preds):
pred_msa = predict_msa(preds, seq)
for g in preds:
gene_seq = ''
s = -3
gene_frame = 0
for m in range(len(pred_msa)):
if pred_msa[m] != '-':
# forward
if g.strand == 1:
# start
if g.start <= s < g.start+3:
gene_frame = 1
# continue or add only if it's the first base of the start and within the sequence
if len(gene_seq) > 0 or (s == g.start and s >= 0):
gene_seq += pred_msa[m]
# end
elif g.end-3 <= s < g.end:
gene_frame = 0
# middle
elif gene_frame > 0:
gene_frame = 1 + (gene_frame % 3)
# continue or add only if it's the first base of a codon and within the sequence
if len(gene_seq) > 0 or (gene_frame == 2 and s >= 0):
gene_seq += pred_msa[m]
# reverse
else:
# end
if g.start <= s < g.start+3:
gene_frame = 9
# continue or add only if it's the first base of the start and within the sequence
#if len(gene_seq) > 0 or (s == g.start and s >= 0):
# gene_seq += pred_msa[m]
# start
elif g.end-3 <= s < g.end:
gene_frame = 0
#if s < len(seq):
if pred_msa[m] != ' ':
gene_seq += pred_msa[m]
# middle
elif gene_frame > 0:
gene_frame -= 1
if gene_frame == 6:
gene_frame = 9
# continue or add only if it's the first base of a codon and within the sequence
if len(gene_seq) > 0 or (gene_frame == 8 and s >= 0):
gene_seq += pred_msa[m]
s += 1
# trim end
gene_seq = gene_seq[:3*(len(gene_seq)/3)]
# orient
if g.strand == 1:
dna_seq = gene_seq
strand = '+'
else:
dna_seq = rc(gene_seq)
strand = '-'
print >> out_aa, '>%s_%d,%d_%s\n%s' % (header, g.start, g.end, strand, translate(dna_seq))
print >> out_dna, '>%s_%d,%d_%s\n%s' % (header, g.start, g.end, strand, dna_seq)
############################################################
# rc
#
# Reverse complement sequence
############################################################
def rc(seq):
return seq.translate(string.maketrans("ATCGatcg","TAGCtagc"))[::-1]
############################################################
# translate
#
# Translate a dna sequence into an amino acid. Attempts
# to maintain lowercase or uppercase. If a codon contains
# both lowercase and uppercase, returns a lowercase codon.
############################################################
code = { 'TTT': 'F', 'TCT': 'S', 'TAT': 'Y', 'TGT': 'C', \
'TTC': 'F', 'TCC': 'S', 'TAC': 'Y', 'TGC': 'C', \
'TTA': 'L', 'TCA': 'S', 'TAA': '*', 'TGA': '*', \
'TTG': 'L', 'TCG': 'S', 'TAG': '*', 'TGG': 'W', \
'CTT': 'L', 'CCT': 'P', 'CAT': 'H', 'CGT': 'R', \
'CTC': 'L', 'CCC': 'P', 'CAC': 'H', 'CGC': 'R', \
'CTA': 'L', 'CCA': 'P', 'CAA': 'Q', 'CGA': 'R', \
'CTG': 'L', 'CCG': 'P', 'CAG': 'Q', 'CGG': 'R', \
'ATT': 'I', 'ACT': 'T', 'AAT': 'N', 'AGT': 'S', \
'ATC': 'I', 'ACC': 'T', 'AAC': 'N', 'AGC': 'S', \
'ATA': 'I', 'ACA': 'T', 'AAA': 'K', 'AGA': 'R', \
'ATG': 'M', 'ACG': 'T', 'AAG': 'K', 'AGG': 'R', \
'GTT': 'V', 'GCT': 'A', 'GAT': 'D', 'GGT': 'G', \
'GTC': 'V', 'GCC': 'A', 'GAC': 'D', 'GGC': 'G', \
'GTA': 'V', 'GCA': 'A', 'GAA': 'E', 'GGA': 'G', \
'GTG': 'V', 'GCG': 'A', 'GAG': 'E', 'GGG': 'G', \
'ttt': 'f', 'tct': 's', 'tat': 'y', 'tgt': 'c', \
'ttc': 'f', 'tcc': 's', 'tac': 'y', 'tgc': 'c', \
'tta': 'l', 'tca': 's', 'taa': '*', 'tga': '*', \
'ttg': 'l', 'tcg': 's', 'tag': '*', 'tgg': 'w', \
'ctt': 'l', 'cct': 'p', 'cat': 'h', 'cgt': 'r', \
'ctc': 'l', 'ccc': 'p', 'cac': 'h', 'cgc': 'r', \
'cta': 'l', 'cca': 'p', 'caa': 'q', 'cga': 'r', \
'ctg': 'l', 'ccg': 'p', 'cag': 'q', 'cgg': 'r', \
'att': 'i', 'act': 't', 'aat': 'n', 'agt': 's', \
'atc': 'i', 'acc': 't', 'aac': 'n', 'agc': 's', \
'ata': 'i', 'aca': 't', 'aaa': 'k', 'aga': 'r', \
'atg': 'm', 'acg': 't', 'aag': 'k', 'agg': 'r', \
'gtt': 'v', 'gct': 'a', 'gat': 'd', 'ggt': 'g', \
'gtc': 'v', 'gcc': 'a', 'gac': 'd', 'ggc': 'g', \
'gta': 'v', 'gca': 'a', 'gaa': 'e', 'gga': 'g', \
'gtg': 'v', 'gcg': 'a', 'gag': 'e', 'ggg': 'g' \
}
def translate(dna):
if len(dna) % 3 != 0:
print 'DNA sequence is not have length divisible by 3.'
return ''
else:
i = 0
peptide = ''
while i < len(dna):
if code.has_key(dna[i:i+3]):
peptide += code[dna[i:i+3]]
else:
peptide += 'X'
i += 3
return peptide
class Pred:
def __init__(self, start, end, strand, start_codon, stop_codon, insertions, deletions, substitutions):
self.start = start
self.end = end
self.strand = strand
self.start_codon = start_codon
self.stop_codon = stop_codon
self.insertions = insertions
self.deletions = deletions
self.substitutions = substitutions
def __str__(self):
return '\t'.join([str(x) for x in [self.start,self.end,self.strand,int(self.start_codon),int(self.stop_codon)]])
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
#pdb.runcall(main)
| 34.938776 | 126 | 0.405885 |
6fc32efb15b48459405abcbbcb7e1ec3de3dfef5 | 1,125 | py | Python | norns/room/tests/test_routes.py | dsnowb/norns | cd78dc1566da4a1b8ffc3ac99e3dd510aea21119 | [
"MIT"
] | null | null | null | norns/room/tests/test_routes.py | dsnowb/norns | cd78dc1566da4a1b8ffc3ac99e3dd510aea21119 | [
"MIT"
] | null | null | null | norns/room/tests/test_routes.py | dsnowb/norns | cd78dc1566da4a1b8ffc3ac99e3dd510aea21119 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.urls import reverse_lazy
from model_mommy import mommy
from gear.models import Weapon
from ..models import Room, Tile
class TestRoutes(TestCase):
"""Integration tests."""
def setUp(self):
self.room = mommy.make(Room)
self.tile = mommy.make(Tile, room=self.room, desc='a tile.')
self.weapon = mommy.make(Weapon, name='sword')
self.weapon.tiles.add(self.tile)
self.weapon.save()
def tearDown(self):
Room.objects.all().delete()
Tile.objects.all().delete()
Weapon.objects.all().delete()
# def test_look_returns_weapon(self):
# """
# Validate that looking on a tile returns the weapon on that tile
# and the description on that tile.
# """
# data = {'verb': 'look', 'tile_id': self.tile.id}
# import pdb; pdb.set_trace()
# response = self.client.post(
# reverse_lazy('room', args=[self.room.id]), data=data)
# # self.assertContains(response.data, 'a tile.')
# self.assertContains(response.data, 'sword')
| 32.142857 | 74 | 0.614222 |
5cbec1616d22e96ac239afa5049a58fdc933898a | 411 | py | Python | PhysicsTools/HeppyCore/python/analyzers/Printer.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | PhysicsTools/HeppyCore/python/analyzers/Printer.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | PhysicsTools/HeppyCore/python/analyzers/Printer.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | from PhysicsTools.HeppyCore.framework.analyzer import Analyzer
class Printer(Analyzer):
def beginLoop(self, setup):
super(Printer, self).beginLoop(setup)
self.firstEvent = True
def process(self, event):
if self.firstEvent:
event.input.Print()
self.firstEvent = False
print "printing event", event.iEv, 'var1', event.input.var1
| 27.4 | 67 | 0.627737 |
ba75fbfe0bd99305feeac7a9b30158d603dca430 | 1,601 | py | Python | packages/facilities/rtdb/python/rtdb2_mon.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | 2 | 2021-01-15T13:27:19.000Z | 2021-08-04T08:40:52.000Z | packages/facilities/rtdb/python/rtdb2_mon.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | null | null | null | packages/facilities/rtdb/python/rtdb2_mon.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | 5 | 2018-05-01T10:39:31.000Z | 2022-03-25T03:02:35.000Z | # Copyright 2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/python
import argparse
import rtdb2tools
# Main structure of the program
if __name__ == "__main__":
# Argument parsing.
descriptionTxt = 'This tool monitors a RtDB item, continuously displaying its value until CTRL-C is pressed.\n'
exampleTxt = 'Example: rtdb2_mon.py -a 4 ACTION\n'
parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-a', '--agent', help='agent ID to use', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('-f', '--frequency', help='refresh frequency in Hz', type=float, default=10)
parser.add_argument('-t', '--timestamp', help='prepend timestamp', action='store_true')
#parser.add_argument('-c', '--onchange', help='show items directly when changed, minimizing latency', action='store_true')
# TODO: zero-latency '--onchange' option requires RTDB wait_for_put, currently not implemented
parser.add_argument('-s', '--showonce', help='filter duplicates, show stale items once', action='store_true')
parser.add_argument('-p', '--path', help='database path to use', type=str, default=rtdb2tools.RTDB2_DEFAULT_PATH)
parser.add_argument('key', help='RtDB key to read')
args = parser.parse_args()
# Instantiate the monitor
r = rtdb2tools.RTDBMonitor(args.agent, args.frequency, args.path)
r.subscribe(args.key)
r.prependTimestamp = args.timestamp
r.showOnce = args.showonce
# Run
r.run()
| 47.088235 | 144 | 0.722673 |
a69130c0257a336ab857201c45e0748ff559c271 | 449 | py | Python | jobs/timeseries/fundamentals_daily.py | bordumb/wsbtrading | 32cadab1d9e2f4d37e7d028cc30f4cd0e924be92 | [
"MIT"
] | 14 | 2021-01-25T00:01:39.000Z | 2021-08-12T09:20:39.000Z | jobs/timeseries/fundamentals_daily.py | bordumb/wsbtrading | 32cadab1d9e2f4d37e7d028cc30f4cd0e924be92 | [
"MIT"
] | 15 | 2021-01-24T20:18:13.000Z | 2021-02-04T21:54:27.000Z | jobs/timeseries/fundamentals_daily.py | bordumb/wsbtrading | 32cadab1d9e2f4d37e7d028cc30f4cd0e924be92 | [
"MIT"
] | 3 | 2021-01-27T14:03:02.000Z | 2021-08-29T04:13:26.000Z | """
This job initially populates the fundamentals_daily table in Postgress from a CSV
TODO: get this automated from Quandl's API
"""
from wsbtrading import data_io
csv_path = f'../../data/prod/fundamentals/daily/fundamentals_daily_20210210.csv'
def main():
# ---------------
# Insert Data |
# ---------------
data_io.insert_csv_to_sql(table_name='fundamentals_daily', csv_path=csv_path)
if __name__ == '__main__':
main()
| 22.45 | 81 | 0.672606 |
c1d8ebc19c3b80b7f9c3065f6ea9093f3886ca5f | 216 | py | Python | django_proficiency_test/users/tests/test_models.py | RamParameswaran/Django_Proficiency_Test | b6838679b5ac2a570588d3769e966fb8188f88d0 | [
"MIT"
] | null | null | null | django_proficiency_test/users/tests/test_models.py | RamParameswaran/Django_Proficiency_Test | b6838679b5ac2a570588d3769e966fb8188f88d0 | [
"MIT"
] | null | null | null | django_proficiency_test/users/tests/test_models.py | RamParameswaran/Django_Proficiency_Test | b6838679b5ac2a570588d3769e966fb8188f88d0 | [
"MIT"
] | null | null | null | import pytest
from django_proficiency_test.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| 21.6 | 64 | 0.787037 |
b36f72448c154156c4c7aace3c36d8a86f4de438 | 792 | py | Python | gestion/migrations/0002_departamento.py | aitorres/sip-online | 4a1e7a7363f394a5030a2fbe2085aff46fe52b81 | [
"MIT"
] | 3 | 2018-12-23T15:37:25.000Z | 2019-05-20T00:06:29.000Z | gestion/migrations/0002_departamento.py | aitorres/sip-online | 4a1e7a7363f394a5030a2fbe2085aff46fe52b81 | [
"MIT"
] | 3 | 2019-06-24T18:22:32.000Z | 2021-06-02T00:56:55.000Z | gestion/migrations/0002_departamento.py | aitorres/sip-online | 4a1e7a7363f394a5030a2fbe2085aff46fe52b81 | [
"MIT"
] | 3 | 2018-11-17T23:31:26.000Z | 2019-05-20T00:14:17.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-06 01:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gestion', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Departamento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
('codigo', models.CharField(max_length=2, unique=True)),
('jefe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gestion.Profesor')),
],
),
]
| 30.461538 | 114 | 0.612374 |
3f560b5bf12c88cc4cd133d2663fa8003fe12e16 | 584 | py | Python | src/upload_file/forms.py | klaskaris11/gsoc2019-anonymization | 09a4ddd2bd74dc0eecded3e5e3f03c4fc4089ce1 | [
"MIT"
] | null | null | null | src/upload_file/forms.py | klaskaris11/gsoc2019-anonymization | 09a4ddd2bd74dc0eecded3e5e3f03c4fc4089ce1 | [
"MIT"
] | 1 | 2020-09-14T16:09:05.000Z | 2020-09-14T16:09:05.000Z | src/upload_file/forms.py | klaskaris11/gsoc2019-anonymization | 09a4ddd2bd74dc0eecded3e5e3f03c4fc4089ce1 | [
"MIT"
] | 1 | 2020-07-05T14:22:13.000Z | 2020-07-05T14:22:13.000Z | from django import forms
from .models import Document, UserDocuments
from django.forms import ClearableFileInput
class UploadDocumentForm(forms.ModelForm):
# path = forms.CharField(required=False)
class Meta:
model = Document
fields = ['file']
widgets = {
'file': ClearableFileInput(attrs={'multiple': True}),
}
class UploadMultipleDocumentsForm(forms.ModelForm):
class Meta:
model = UserDocuments
fields = []
widgets = {
'file': ClearableFileInput(attrs={'multiple': True}),
}
| 23.36 | 65 | 0.630137 |
e0543959c5818df506a62a4b134c19629b25d20d | 2,145 | py | Python | tasks.py | JakeBrand/CMPUT410-E4 | 67e1454d83c35055f2b63f090c42af534b2c0018 | [
"Apache-2.0"
] | null | null | null | tasks.py | JakeBrand/CMPUT410-E4 | 67e1454d83c35055f2b63f090c42af534b2c0018 | [
"Apache-2.0"
] | null | null | null | tasks.py | JakeBrand/CMPUT410-E4 | 67e1454d83c35055f2b63f090c42af534b2c0018 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, request, redirect, url_for
import sqlite3
app = Flask(__name__)
dbFile = 'db1.db'
conn = None
def get_conn():
global conn
if conn is None:
conn = sqlite3.connect(dbFile)
conn.row_factory = sqlite3.Row
return conn
@app.teardown_appcontext
def close_conn(exception):
global conn
if conn is not None:
conn.close()
conn = None
def query_db(query, args=(), one=False):
cur = get_conn().cursor()
cur.execute(query, args)
# reurnlist of dictionaries (or tuples)
result = cur.fetchall()
cur.close()
return (result[0] if result else None) if one else result
def add_task(category, priority, description):
taskTuple = (category, priority, description)
query_db('insert into tasks (category, priority, description) \
values(?,?,?)', (taskTuple), one=True)
get_conn().commit()
@app.route('/')
def welcome():
return '<h1>Welcome to flask lab</h1>'
@app.route('/task', methods = ['GET','POST'])
def task():
#post
if request.method == 'POST':
category = request.form['category']
priority = request.form['priority']
description = request.form['description']
add_task(category, priority, description)
return redirect(url_for('task'))
resp = ''
resp = resp + '''
<form action = "" method=post>
<p>Category<input type=text name=category></p>
<p>Priority<input type=text name=priority></p>
<p>Description<input type=text name=description></p>
<p><input type=submit value=Add></p>
</form>
'''
resp = resp + '''
<table border = "1" cellpadding = "3">
<tbody>
<tr>
<th>Category</th>
<th>Priority</th>
<th>Description</th>
</tr>
'''
for task in query_db('select * from tasks'):
resp = resp + "<tr><td>%s</td><td>%s</td><td>%s</td></tr>"%(task['category'],task['priority'],task['description'])
resp = resp + "</tbody></table>"
return resp
if __name__ == '__main__':
app.debug = True
app.run()
| 27.5 | 122 | 0.584615 |
e60e3ff86285cabaa16e8619dcb025e9affc3cb5 | 4,557 | py | Python | espnet/nets/pytorch_backend/frontends/frontend.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/frontends/frontend.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/frontends/frontend.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional, Tuple, Union
import numpy
import torch
import torch.nn as nn
from torch_complex.tensor import ComplexTensor
from espnet.nets.pytorch_backend.frontends.dnn_beamformer import DNN_Beamformer
from espnet.nets.pytorch_backend.frontends.dnn_wpe import DNN_WPE
class Frontend(nn.Module):
def __init__(
self,
idim: int,
# WPE options
use_wpe: bool = False,
wtype: str = "blstmp",
wlayers: int = 3,
wunits: int = 300,
wprojs: int = 320,
wdropout_rate: float = 0.0,
taps: int = 5,
delay: int = 3,
use_dnn_mask_for_wpe: bool = True,
# Beamformer options
use_beamformer: bool = False,
btype: str = "blstmp",
blayers: int = 3,
bunits: int = 300,
bprojs: int = 320,
bnmask: int = 2,
badim: int = 320,
ref_channel: int = -1,
bdropout_rate=0.0,
):
super().__init__()
self.use_beamformer = use_beamformer
self.use_wpe = use_wpe
self.use_dnn_mask_for_wpe = use_dnn_mask_for_wpe
# use frontend for all the data,
# e.g. in the case of multi-speaker speech separation
self.use_frontend_for_all = bnmask > 2
if self.use_wpe:
if self.use_dnn_mask_for_wpe:
# Use DNN for power estimation
# (Not observed significant gains)
iterations = 1
else:
# Performing as conventional WPE, without DNN Estimator
iterations = 2
self.wpe = DNN_WPE(
wtype=wtype,
widim=idim,
wunits=wunits,
wprojs=wprojs,
wlayers=wlayers,
taps=taps,
delay=delay,
dropout_rate=wdropout_rate,
iterations=iterations,
use_dnn_mask=use_dnn_mask_for_wpe,
)
else:
self.wpe = None
if self.use_beamformer:
self.beamformer = DNN_Beamformer(
btype=btype,
bidim=idim,
bunits=bunits,
bprojs=bprojs,
blayers=blayers,
bnmask=bnmask,
dropout_rate=bdropout_rate,
badim=badim,
ref_channel=ref_channel,
)
else:
self.beamformer = None
def forward(
self, x: ComplexTensor, ilens: Union[torch.LongTensor, numpy.ndarray, List[int]]
) -> Tuple[ComplexTensor, torch.LongTensor, Optional[ComplexTensor]]:
assert len(x) == len(ilens), (len(x), len(ilens))
# (B, T, F) or (B, T, C, F)
if x.dim() not in (3, 4):
raise ValueError(f"Input dim must be 3 or 4: {x.dim()}")
if not torch.is_tensor(ilens):
ilens = torch.from_numpy(numpy.asarray(ilens)).to(x.device)
mask = None
h = x
if h.dim() == 4:
if self.training:
choices = [(False, False)] if not self.use_frontend_for_all else []
if self.use_wpe:
choices.append((True, False))
if self.use_beamformer:
choices.append((False, True))
use_wpe, use_beamformer = choices[numpy.random.randint(len(choices))]
else:
use_wpe = self.use_wpe
use_beamformer = self.use_beamformer
# 1. WPE
if use_wpe:
# h: (B, T, C, F) -> h: (B, T, C, F)
h, ilens, mask = self.wpe(h, ilens)
# 2. Beamformer
if use_beamformer:
# h: (B, T, C, F) -> h: (B, T, F)
h, ilens, mask = self.beamformer(h, ilens)
return h, ilens, mask
def frontend_for(args, idim):
return Frontend(
idim=idim,
# WPE options
use_wpe=args.use_wpe,
wtype=args.wtype,
wlayers=args.wlayers,
wunits=args.wunits,
wprojs=args.wprojs,
wdropout_rate=args.wdropout_rate,
taps=args.wpe_taps,
delay=args.wpe_delay,
use_dnn_mask_for_wpe=args.use_dnn_mask_for_wpe,
# Beamformer options
use_beamformer=args.use_beamformer,
btype=args.btype,
blayers=args.blayers,
bunits=args.bunits,
bprojs=args.bprojs,
bnmask=args.bnmask,
badim=args.badim,
ref_channel=args.ref_channel,
bdropout_rate=args.bdropout_rate,
)
| 30.583893 | 88 | 0.53544 |
fe261dbe126d3b4bc56fa6e80d15c4fb7fd096fe | 1,513 | py | Python | tests/test_sitelocation.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 19 | 2015-12-08T23:33:51.000Z | 2022-01-31T04:41:10.000Z | tests/test_sitelocation.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 2 | 2019-10-04T10:57:00.000Z | 2021-10-01T06:46:17.000Z | tests/test_sitelocation.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 7 | 2015-11-04T02:25:01.000Z | 2021-12-08T03:14:28.000Z | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.location_and_climate import SiteLocation
log = logging.getLogger(__name__)
class TestSiteLocation(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_sitelocation(self):
pyidf.validation_level = ValidationLevel.error
obj = SiteLocation()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_latitude = 0.0
obj.latitude = var_latitude
# real
var_longitude = 0.0
obj.longitude = var_longitude
# real
var_time_zone = 1.0
obj.time_zone = var_time_zone
# real
var_elevation = 4299.99995
obj.elevation = var_elevation
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.sitelocations[0].name, var_name)
self.assertAlmostEqual(idf2.sitelocations[0].latitude, var_latitude)
self.assertAlmostEqual(idf2.sitelocations[0].longitude, var_longitude)
self.assertAlmostEqual(idf2.sitelocations[0].time_zone, var_time_zone)
self.assertAlmostEqual(idf2.sitelocations[0].elevation, var_elevation) | 28.018519 | 78 | 0.651685 |
1e22b0477c279da6bb1c08c225642e47d39bdf6e | 1,616 | py | Python | main.py | sgongar/Ping | abe8f4eea3562ba29ae9300f030c7f1b1ba94e9d | [
"MIT"
] | null | null | null | main.py | sgongar/Ping | abe8f4eea3562ba29ae9300f030c7f1b1ba94e9d | [
"MIT"
] | null | null | null | main.py | sgongar/Ping | abe8f4eea3562ba29ae9300f030c7f1b1ba94e9d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from bluetooth import *
from time import sleep
from firebase import send_time, set_up_firebase
import I2C_LCD_driver
from utils import get_time
class PingDetect:
def __init__(self):
"""
"""
self.db = set_up_firebase()
self.user_id = 'Morty'
self.mylcd = I2C_LCD_driver.lcd()
self.receive_data()
def receive_data(self):
"""
:return:
"""
bd_addr = "00:18:E4:35:03:3A"
port = 1
sock = BluetoothSocket(RFCOMM)
sock.connect((bd_addr, port))
self.display_info('Sistema listo', 1)
while 1:
data = sock.recv(10)
data = str(data, 'utf-8')
print(data)
if '1' in data:
actual_time = get_time()
self.mylcd.lcd_clear()
msg = 'Ping en {}'.format(actual_time)
send_time(self.db, self.user_id)
self.display_info('Sistema listo', 1)
self.display_info(msg, 2)
if '2' in data:
actual_time = get_time()
self.mylcd.lcd_clear()
msg = 'ALARMA {}'.format(actual_time)
send_time(self.db, self.user_id)
self.display_info('Sistema listo', 1)
self.display_info(msg, 2)
sleep(2.0)
sock.close()
def display_info(self, msg, row):
"""
:param msg:
:param row:
:return:
"""
self.mylcd.lcd_display_string(msg, row, 0)
if __name__ == "__main__":
PingDetect()
| 24.484848 | 54 | 0.513614 |
c02d6c91bfc9e7b6e111b6619f4a5c824c402cb3 | 654 | py | Python | PythonScriptingEngine/TestScripts/TestScript.py | ayasman/PythonScriptingEngine | af0d12d42b6d00d81558405d2b559cc27725a0e9 | [
"MIT"
] | null | null | null | PythonScriptingEngine/TestScripts/TestScript.py | ayasman/PythonScriptingEngine | af0d12d42b6d00d81558405d2b559cc27725a0e9 | [
"MIT"
] | null | null | null | PythonScriptingEngine/TestScripts/TestScript.py | ayasman/PythonScriptingEngine | af0d12d42b6d00d81558405d2b559cc27725a0e9 | [
"MIT"
] | null | null | null | import clr
import System
clr.AddReference("System.Core")
clr.AddReference("System.Windows.Forms")
clr.ImportExtensions(System.Linq)
from System import String
from System import Guid
from TestIronPython.Scripting.Interfaces import IActionScript
class Script1(IActionScript):
def Load(this):
print "1"
def Unload(this):
print "2"
def Execute(this):
print "3"
def CanExecute():
return true
pluginID = Guid.Parse("378F108E-A65A-4599-89EC-26F99872B8FB")
relatedModuleID = Guid.Parse("407A91ED-5937-4964-9B26-10E3D17D26DB")
plugin = Script1()
ScriptingManager.RegisterScript(pluginID, relatedModuleID, plugin) | 28.434783 | 68 | 0.744648 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.