hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
48515b328f979a13dbeabdf77684ab84524531f5
| 26,641
|
py
|
Python
|
VMBackup/main/freezesnapshotter.py
|
shridpant/azure-linux-extensions
|
4b5e66f33d5b93b15b427a9438931f0414f12a6e
|
[
"Apache-2.0"
] | 266
|
2015-01-05T04:13:15.000Z
|
2022-03-24T17:52:51.000Z
|
VMBackup/main/freezesnapshotter.py
|
shridpant/azure-linux-extensions
|
4b5e66f33d5b93b15b427a9438931f0414f12a6e
|
[
"Apache-2.0"
] | 703
|
2015-01-27T07:16:57.000Z
|
2022-03-29T09:01:23.000Z
|
VMBackup/main/freezesnapshotter.py
|
shridpant/azure-linux-extensions
|
4b5e66f33d5b93b15b427a9438931f0414f12a6e
|
[
"Apache-2.0"
] | 276
|
2015-01-20T11:11:15.000Z
|
2022-03-24T12:40:49.000Z
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
try:
import urlparse as urlparser
except ImportError:
import urllib.parse as urlparser
import traceback
import datetime
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
import multiprocessing as mp
import time
import json
from common import CommonVariables
from HttpUtil import HttpUtil
from Utils import Status
from Utils import HandlerUtil
from fsfreezer import FsFreezer
from guestsnapshotter import GuestSnapshotter
from hostsnapshotter import HostSnapshotter
from Utils import HostSnapshotObjects
import ExtensionErrorCodeHelper
# need to be implemented in next release
#from dhcpHandler import DhcpHandler
class FreezeSnapshotter(object):
"""description of class"""
def __init__(self, logger, hutil , freezer, g_fsfreeze_on, para_parser, takeCrashConsistentSnapshot):
self.logger = logger
self.configfile = '/etc/azure/vmbackup.conf'
self.hutil = hutil
self.freezer = freezer
self.g_fsfreeze_on = g_fsfreeze_on
self.para_parser = para_parser
if(para_parser.snapshotTaskToken == None):
para_parser.snapshotTaskToken = '' #making snaoshot string empty when snapshotTaskToken is null
self.logger.log('snapshotTaskToken : ' + str(para_parser.snapshotTaskToken))
self.takeSnapshotFrom = CommonVariables.firstHostThenGuest
self.isManaged = False
self.taskId = self.para_parser.taskId
self.hostIp = '168.63.129.16'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success
self.takeCrashConsistentSnapshot = takeCrashConsistentSnapshot
self.logger.log('FreezeSnapshotter : takeCrashConsistentSnapshot = ' + str(self.takeCrashConsistentSnapshot))
#implement in next release
'''
# fetching wireserver IP from DHCP
self.dhcpHandlerObj = None
try:
self.dhcpHandlerObj = DhcpHandler(self.logger)
self.hostIp = self.dhcpHandlerObj.getHostEndoint()
except Exception as e:
errorMsg = "Failed to get hostIp from DHCP with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg, True, 'Error')
self.hostIp = '168.63.129.16'
'''
self.logger.log( "hostIp : " + self.hostIp)
try:
if(para_parser.customSettings != None and para_parser.customSettings != ''):
self.logger.log('customSettings : ' + str(para_parser.customSettings))
customSettings = json.loads(para_parser.customSettings)
snapshotMethodConfigValue = self.hutil.get_strvalue_from_configfile(CommonVariables.SnapshotMethod,customSettings['takeSnapshotFrom'])
self.logger.log('snapshotMethodConfigValue : ' + str(snapshotMethodConfigValue))
if snapshotMethodConfigValue != None and snapshotMethodConfigValue != '':
self.takeSnapshotFrom = snapshotMethodConfigValue
else:
self.takeSnapshotFrom = customSettings['takeSnapshotFrom']
if(para_parser.includedDisks != None and CommonVariables.isAnyDiskExcluded in para_parser.includedDisks.keys()):
if (para_parser.includedDisks[CommonVariables.isAnyDiskExcluded] == True and (para_parser.includeLunList == None or para_parser.includeLunList.count == 0)):
self.logger.log('Some disks are excluded from backup and LUN list is not present. Setting the snapshot mode to onlyGuest.')
self.takeSnapshotFrom = CommonVariables.onlyGuest
#Check if snapshot uri has special characters
if self.hutil.UriHasSpecialCharacters(self.para_parser.blobs):
self.logger.log('Some disk blob Uris have special characters.')
waDiskLunList= []
if "waDiskLunList" in customSettings.keys() and customSettings['waDiskLunList'] != None :
waDiskLunList = customSettings['waDiskLunList']
self.logger.log('WA Disk Lun List ' + str(waDiskLunList))
if waDiskLunList!=None and waDiskLunList.count != 0 and para_parser.includeLunList!=None and para_parser.includeLunList.count!=0 :
for crpLunNo in para_parser.includeLunList :
if crpLunNo in waDiskLunList :
self.logger.log('WA disk is present on the VM. Setting the snapshot mode to onlyHost.')
self.takeSnapshotFrom = CommonVariables.onlyHost
break
if(para_parser.includedDisks != None and CommonVariables.isAnyWADiskIncluded in para_parser.includedDisks.keys()):
if (para_parser.includedDisks[CommonVariables.isAnyWADiskIncluded] == True):
self.logger.log('WA disk is included. Setting the snapshot mode to onlyHost.')
self.takeSnapshotFrom = CommonVariables.onlyHost
if(para_parser.includedDisks != None and CommonVariables.isVmgsBlobIncluded in para_parser.includedDisks.keys()):
if (para_parser.includedDisks[CommonVariables.isVmgsBlobIncluded] == True):
self.logger.log('Vmgs Blob is included. Setting the snapshot mode to onlyHost.')
self.takeSnapshotFrom = CommonVariables.onlyHost
self.isManaged = customSettings['isManagedVm']
if( "backupTaskId" in customSettings.keys()):
self.taskId = customSettings["backupTaskId"]
except Exception as e:
errMsg = 'Failed to serialize customSettings with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
self.isManaged = True
self.logger.log('[FreezeSnapshotter] isManaged flag : ' + str(self.isManaged))
def doFreezeSnapshot(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed = False
unable_to_sleep = False
""" Do Not remove below HttpUtil object creation. This is to ensure HttpUtil singleton object is created before freeze."""
http_util = HttpUtil(self.logger)
if(self.takeSnapshotFrom == CommonVariables.onlyGuest):
run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromGuest()
elif(self.takeSnapshotFrom == CommonVariables.firstGuestThenHost):
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromFirstGuestThenHost()
elif(self.takeSnapshotFrom == CommonVariables.firstHostThenGuest):
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromFirstHostThenGuest()
elif(self.takeSnapshotFrom == CommonVariables.onlyHost):
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromOnlyHost()
else :
self.logger.log('Snapshot method did not match any listed type, taking firstHostThenGuest as default')
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromFirstHostThenGuest()
self.logger.log('doFreezeSnapshot : run_result - {0} run_status - {1} all_failed - {2} unable_to_sleep - {3} is_inconsistent - {4} values post snapshot'.format(str(run_result), str(run_status), str(all_failed), str(unable_to_sleep), str(is_inconsistent)))
if (run_result == CommonVariables.success):
run_result, run_status = self.updateErrorCode(blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent)
snapshot_info_array = self.update_snapshotinfoarray(blob_snapshot_info_array)
if not (run_result == CommonVariables.success):
self.hutil.SetExtErrorCode(self.extensionErrorCode)
return run_result, run_status, snapshot_info_array
def update_snapshotinfoarray(self, blob_snapshot_info_array):
snapshot_info_array = []
self.logger.log('updating snapshot info array from blob snapshot info')
if blob_snapshot_info_array != None and blob_snapshot_info_array !=[]:
for blob_snapshot_info in blob_snapshot_info_array:
if blob_snapshot_info != None:
snapshot_info_array.append(Status.SnapshotInfoObj(blob_snapshot_info.isSuccessful, blob_snapshot_info.snapshotUri, blob_snapshot_info.errorMessage))
return snapshot_info_array
def updateErrorCode(self, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent):
run_result = CommonVariables.success
any_failed = False
run_status = 'success'
if unable_to_sleep:
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'T:S Machine unable to sleep'
self.logger.log(error_msg, True, 'Error')
elif is_inconsistent == True :
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'Snapshots are inconsistent'
self.logger.log(error_msg, True, 'Error')
elif blob_snapshot_info_array != None:
for blob_snapshot_info in blob_snapshot_info_array:
if blob_snapshot_info != None and blob_snapshot_info.errorMessage != None :
if 'The rate of snapshot blob calls is exceeded' in blob_snapshot_info.errorMessage:
run_result = CommonVariables.FailedRetryableSnapshotRateExceeded
run_status = 'error'
error_msg = 'Retrying when snapshot failed with SnapshotRateExceeded'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotRateExceeded
self.logger.log(error_msg, True, 'Error')
break
elif 'The snapshot count against this blob has been exceeded' in blob_snapshot_info.errorMessage:
run_result = CommonVariables.FailedSnapshotLimitReached
run_status = 'error'
error_msg = 'T:S Enable failed with FailedSnapshotLimitReached errror'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedSnapshotLimitReached
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Error')
break
elif blob_snapshot_info.isSuccessful == False and not all_failed:
any_failed = True
elif blob_snapshot_info != None and blob_snapshot_info.isSuccessful == False:
any_failed = True
if run_result == CommonVariables.success and all_failed:
run_status = 'error'
run_result = CommonVariables.FailedRetryableSnapshotFailedNoNetwork
error_msg = 'T:S Enable failed with FailedRetryableSnapshotFailedNoNetwork errror'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Error')
elif run_result == CommonVariables.success and any_failed:
run_result = CommonVariables.FailedRetryableSnapshotFailedNoNetwork
error_msg = 'T:S Enable failed with FailedRetryableSnapshotFailedRestrictedNetwork errror'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
run_status = 'error'
self.logger.log(error_msg, True, 'Error')
return run_result, run_status
def freeze(self):
try:
timeout = self.hutil.get_intvalue_from_configfile('timeout',60)
self.logger.log('T:S freeze, timeout value ' + str(timeout))
time_before_freeze = datetime.datetime.now()
freeze_result,timedout = self.freezer.freeze_safe(timeout)
time_after_freeze = datetime.datetime.now()
freezeTimeTaken = time_after_freeze-time_before_freeze
self.logger.log('T:S ***** freeze, time_before_freeze=' + str(time_before_freeze) + ", time_after_freeze=" + str(time_after_freeze) + ", freezeTimeTaken=" + str(freezeTimeTaken))
HandlerUtil.HandlerUtility.add_to_telemetery_data("FreezeTime", str(time_after_freeze-time_before_freeze-datetime.timedelta(seconds=5)))
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
self.logger.log('T:S freeze result ' + str(freeze_result) + ', timedout :' + str(timedout))
if (timedout == True):
run_result = CommonVariables.FailedFsFreezeTimeout
run_status = 'error'
error_msg = 'T:S ###### Enable failed with error: freeze took longer than timeout'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableFsFreezeTimeout
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Error')
elif(freeze_result is not None and len(freeze_result.errors) > 0 and CommonVariables.unable_to_open_err_string in str(freeze_result)):
run_result = CommonVariables.FailedUnableToOpenMount
run_status = 'error'
error_msg = 'T:S Enable failed with error: ' + str(freeze_result)
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableUnableToOpenMount
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Warning')
elif(freeze_result is not None and len(freeze_result.errors) > 0):
run_result = CommonVariables.FailedFsFreezeFailed
run_status = 'error'
error_msg = 'T:S Enable failed with error: ' + str(freeze_result)
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableFsFreezeFailed
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Warning')
except Exception as e:
errMsg = 'Failed to do the freeze with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
run_result = CommonVariables.error
run_status = 'error'
return run_result, run_status
def takeSnapshotFromGuest(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
unable_to_sleep = False
blob_snapshot_info_array = None
all_snapshots_failed = False
try:
if( self.para_parser.blobs == None or len(self.para_parser.blobs) == 0) :
run_result = CommonVariables.FailedRetryableSnapshotFailedNoNetwork
run_status = 'error'
error_msg = 'T:S taking snapshot failed as blobs are empty or none'
self.logger.log(error_msg, True, 'Error')
all_failed = True
all_snapshots_failed = True
return run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent
if self.g_fsfreeze_on :
run_result, run_status = self.freeze()
if(self.para_parser is not None and self.is_command_timedout(self.para_parser) == True):
self.hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedGuestAgentInvokedCommandTooLate)
run_result = CommonVariables.FailedGuestAgentInvokedCommandTooLate
run_status = 'error'
all_failed = True
all_snapshots_failed = True
self.logger.log('T:S takeSnapshotFromGuest : Thawing as failing due to CRP timeout', True, 'Error')
self.freezer.thaw_safe()
elif(run_result == CommonVariables.success or self.takeCrashConsistentSnapshot == True):
HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.snapshotCreator, CommonVariables.guestExtension)
snap_shotter = GuestSnapshotter(self.logger, self.hutil)
self.logger.log('T:S doing snapshot now...')
time_before_snapshot = datetime.datetime.now()
snapshot_result, blob_snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep, all_snapshots_failed = snap_shotter.snapshotall(self.para_parser, self.freezer, self.g_fsfreeze_on)
time_after_snapshot = datetime.datetime.now()
snapshotTimeTaken = time_after_snapshot-time_before_snapshot
self.logger.log('T:S ***** takeSnapshotFromGuest, time_before_snapshot=' + str(time_before_snapshot) + ", time_after_snapshot=" + str(time_after_snapshot) + ", snapshotTimeTaken=" + str(snapshotTimeTaken))
HandlerUtil.HandlerUtility.add_to_telemetery_data("snapshotTimeTaken", str(snapshotTimeTaken))
self.logger.log('T:S snapshotall ends...', True)
except Exception as e:
errMsg = 'Failed to do the snapshot with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
run_result = CommonVariables.error
run_status = 'error'
return run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent
def takeSnapshotFromFirstGuestThenHost(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
unable_to_sleep = False
blob_snapshot_info_array = None
all_snapshots_failed = False
run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromGuest()
if(all_snapshots_failed):
try:
#to make sure binary is thawed
self.logger.log('[takeSnapshotFromFirstGuestThenHost] : Thawing again post the guest snapshotting failure')
self.freezer.thaw_safe()
except Exception as e:
self.logger.log('[takeSnapshotFromFirstGuestThenHost] : Exception in Thaw %s, stack trace: %s' % (str(e), traceback.format_exc()))
run_result, run_status, blob_snapshot_info_array,all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromOnlyHost()
return run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent
def takeSnapshotFromFirstHostThenGuest(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
unable_to_sleep = False
blob_snapshot_info_array = None
snap_shotter = HostSnapshotter(self.logger, self.hostIp)
pre_snapshot_statuscode, responseBody = snap_shotter.pre_snapshot(self.para_parser, self.taskId)
if(pre_snapshot_statuscode == 200 or pre_snapshot_statuscode == 201):
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromOnlyHost()
else:
run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromGuest()
if all_snapshots_failed and run_result != CommonVariables.success:
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork
elif run_result != CommonVariables.success :
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork
return run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent
def takeSnapshotFromOnlyHost(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
unable_to_sleep = False
blob_snapshot_info_array = None
self.logger.log('Taking Snapshot through Host')
HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.snapshotCreator, CommonVariables.backupHostService)
if self.g_fsfreeze_on :
run_result, run_status = self.freeze()
if(self.para_parser is not None and self.is_command_timedout(self.para_parser) == True):
self.hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedGuestAgentInvokedCommandTooLate)
run_result = CommonVariables.FailedGuestAgentInvokedCommandTooLate
run_status = 'error'
all_failed = True
self.logger.log('T:S takeSnapshotFromOnlyHost : Thawing as failing due to CRP timeout', True, 'Error')
self.freezer.thaw_safe()
elif(run_result == CommonVariables.success or self.takeCrashConsistentSnapshot == True):
snap_shotter = HostSnapshotter(self.logger, self.hostIp)
self.logger.log('T:S doing snapshot now...')
time_before_snapshot = datetime.datetime.now()
blob_snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep = snap_shotter.snapshotall(self.para_parser, self.freezer, self.g_fsfreeze_on, self.taskId)
time_after_snapshot = datetime.datetime.now()
snapshotTimeTaken = time_after_snapshot-time_before_snapshot
self.logger.log('T:S takeSnapshotFromHost, time_before_snapshot=' + str(time_before_snapshot) + ", time_after_snapshot=" + str(time_after_snapshot) + ", snapshotTimeTaken=" + str(snapshotTimeTaken))
HandlerUtil.HandlerUtility.add_to_telemetery_data("snapshotTimeTaken", str(snapshotTimeTaken))
self.logger.log('T:S snapshotall ends...', True)
return run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent
def is_command_timedout(self, para_parser):
result = False
dateTimeNow = datetime.datetime.utcnow()
try:
try:
snap_shotter = HostSnapshotter(self.logger, self.hostIp)
pre_snapshot_statuscode,responseBody = snap_shotter.pre_snapshot(self.para_parser, self.taskId)
if(int(pre_snapshot_statuscode) == 200 or int(pre_snapshot_statuscode) == 201) and (responseBody != None and responseBody != "") :
resonse = json.loads(responseBody)
dateTimeNow = datetime.datetime(resonse['responseTime']['year'], resonse['responseTime']['month'], resonse['responseTime']['day'], resonse['responseTime']['hour'], resonse['responseTime']['minute'], resonse['responseTime']['second'])
self.logger.log('Date and time extracted from pre-snapshot request: '+ str(dateTimeNow))
except Exception as e:
self.logger.log('Error in getting Host time falling back to using system time. Exception %s, stack trace: %s' % (str(e), traceback.format_exc()))
if(para_parser is not None and para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != ""):
utcTicksLong = int(para_parser.commandStartTimeUTCTicks)
self.logger.log('utcTicks in long format' + str(utcTicksLong))
commandStartTime = self.convert_time(utcTicksLong)
self.logger.log('command start time is ' + str(commandStartTime) + " and utcNow is " + str(dateTimeNow))
timespan = dateTimeNow - commandStartTime
MAX_TIMESPAN = 140 * 60 # in seconds
total_span_in_seconds = self.timedelta_total_seconds(timespan)
self.logger.log('timespan: ' + str(timespan) + ', total_span_in_seconds: ' + str(total_span_in_seconds) + ', MAX_TIMESPAN: ' + str(MAX_TIMESPAN))
if total_span_in_seconds > MAX_TIMESPAN :
self.logger.log('CRP timeout limit has reached, should abort.')
result = True
except Exception as e:
self.logger.log('T:S is_command_timedout : Exception %s, stack trace: %s' % (str(e), traceback.format_exc()))
return result
def convert_time(self, utcTicks):
return datetime.datetime(1, 1, 1) + datetime.timedelta(microseconds = utcTicks / 10)
def timedelta_total_seconds(self, delta):
if not hasattr(datetime.timedelta, 'total_seconds'):
return delta.days * 86400 + delta.seconds
else:
return delta.total_seconds()
| 59.334076
| 263
| 0.68282
|
b7e2638b6d46303e411c950202b8734b0b69224f
| 2,476
|
py
|
Python
|
stable_baselines/common/input.py
|
erniejunior/stable-baselines
|
9f3346f2538e4efb3d32503d585b6a627b04fa74
|
[
"MIT"
] | 49
|
2020-07-24T18:17:12.000Z
|
2022-01-04T15:30:52.000Z
|
stable_baselines/common/input.py
|
erniejunior/stable-baselines
|
9f3346f2538e4efb3d32503d585b6a627b04fa74
|
[
"MIT"
] | 14
|
2020-07-21T20:21:08.000Z
|
2022-03-12T00:42:18.000Z
|
stable_baselines/common/input.py
|
erniejunior/stable-baselines
|
9f3346f2538e4efb3d32503d585b6a627b04fa74
|
[
"MIT"
] | 5
|
2020-07-27T12:35:00.000Z
|
2021-07-19T03:04:21.000Z
|
import numpy as np
import tensorflow as tf
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete
def observation_input(ob_space, batch_size=None, name='Ob', scale=False):
"""
Build observation input with encoding depending on the observation space type
When using Box ob_space, the input will be normalized between [1, 0] on the bounds ob_space.low and ob_space.high.
:param ob_space: (Gym Space) The observation space
:param batch_size: (int) batch size for input
(default is None, so that resulting input placeholder can take tensors with any batch size)
:param name: (str) tensorflow variable name for input placeholder
:param scale: (bool) whether or not to scale the input
:return: (TensorFlow Tensor, TensorFlow Tensor) input_placeholder, processed_input_tensor
"""
if isinstance(ob_space, Discrete):
input_x = tf.placeholder(shape=(batch_size,), dtype=tf.int32, name=name)
processed_x = tf.to_float(tf.one_hot(input_x, ob_space.n))
return input_x, processed_x
elif isinstance(ob_space, Box):
input_x = tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=ob_space.dtype, name=name)
processed_x = tf.to_float(input_x)
# rescale to [1, 0] if the bounds are defined
if (scale and
not np.any(np.isinf(ob_space.low)) and not np.any(np.isinf(ob_space.high)) and
np.any((ob_space.high - ob_space.low) != 0)):
# equivalent to processed_x / 255.0 when bounds are set to [255, 0]
processed_x = ((processed_x - ob_space.low) / (ob_space.high - ob_space.low))
return input_x, processed_x
elif isinstance(ob_space, MultiBinary):
input_x = tf.placeholder(shape=(batch_size, ob_space.n), dtype=tf.int32, name=name)
processed_x = tf.to_float(input_x)
return input_x, processed_x
elif isinstance(ob_space, MultiDiscrete):
input_x = tf.placeholder(shape=(batch_size, len(ob_space.nvec)), dtype=tf.int32, name=name)
processed_x = tf.concat([tf.to_float(tf.one_hot(input_split, ob_space.nvec[i]))
for i, input_split in enumerate(tf.split(input_x, len(ob_space.nvec), axis=-1))],
axis=-1)
return input_x, processed_x
else:
raise NotImplementedError("Error: the model does not support input space of type {}".format(
type(ob_space).__name__))
| 48.54902
| 118
| 0.672456
|
290291a1bdc292feef8381cf9cc85bf2aaaa0df9
| 2,249
|
py
|
Python
|
tests/test_ba/test_assetmanager.py
|
SahandAslani/ballistica
|
7e3814cd2a1920ea8f5820cb1cdbb4dc5420d30e
|
[
"MIT"
] | 2
|
2020-07-02T22:18:58.000Z
|
2020-07-02T22:19:49.000Z
|
tests/test_ba/test_assetmanager.py
|
Awesome-Logic/ballistica
|
233a4a4f7840c9c666a1809626b6993a4b145349
|
[
"MIT"
] | null | null | null |
tests/test_ba/test_assetmanager.py
|
Awesome-Logic/ballistica
|
233a4a4f7840c9c666a1809626b6993a4b145349
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Testing asset manager functionality."""
from __future__ import annotations
from typing import TYPE_CHECKING
import weakref
import tempfile
from pathlib import Path
# noinspection PyProtectedMember
from ba._assetmanager import AssetManager
from bacommon.assets import AssetPackageFlavor
# import pytest
if TYPE_CHECKING:
pass
def test_assetmanager() -> None:
"""Testing."""
# Disabling for now...
if bool(False):
with tempfile.TemporaryDirectory() as tmpdir:
manager = AssetManager(rootdir=Path(tmpdir))
wref = weakref.ref(manager)
manager.start()
gather = manager.launch_gather(packages=['a@2'],
flavor=AssetPackageFlavor.DESKTOP,
account_token='dummytoken')
wref2 = weakref.ref(gather)
manager.stop()
# Make sure nothing is keeping itself alive.
del manager
del gather
assert wref() is None
assert wref2() is None
| 36.274194
| 79
| 0.670965
|
03cab5957ee74bbcebb8dcb2d9f9aabf7ce0d7d0
| 606
|
py
|
Python
|
tests/python/test_abs.py
|
squarefk/test_actions
|
dd3b0305c49b577102786eb1c24c590ef160bc30
|
[
"MIT"
] | 2
|
2021-04-20T04:53:07.000Z
|
2021-04-20T04:53:12.000Z
|
tests/python/test_abs.py
|
squarefk/test_actions
|
dd3b0305c49b577102786eb1c24c590ef160bc30
|
[
"MIT"
] | 3
|
2020-08-24T09:07:15.000Z
|
2020-08-24T09:18:29.000Z
|
tests/python/test_abs.py
|
squarefk/test_actions
|
dd3b0305c49b577102786eb1c24c590ef160bc30
|
[
"MIT"
] | 1
|
2021-11-23T11:16:54.000Z
|
2021-11-23T11:16:54.000Z
|
import taichi as ti
@ti.all_archs
def test_abs():
x = ti.field(ti.f32)
y = ti.field(ti.f32)
N = 16
ti.root.dense(ti.i, N).place(x)
ti.root.dense(ti.i, N).place(y)
ti.root.lazy_grad()
@ti.kernel
def func():
for i in range(N):
x[i] = ti.abs(y[i])
for i in range(N):
y[i] = i - 10
x.grad[i] = 1
func()
func.grad()
def sgn(x):
if x > 0:
return 1
if x < 0:
return -1
return 0
for i in range(N):
assert x[i] == abs(y[i])
assert y.grad[i] == sgn(y[i])
| 16.378378
| 37
| 0.452145
|
5485d71180870425ced043adcc39e77e3c25dd84
| 14,860
|
py
|
Python
|
tests/test_ban_tk_connect.py
|
FlorianSW/hll_rcon_tool
|
26a37b07eaab34dfb5a6d10c0f02e0fcae51dd88
|
[
"MIT"
] | 49
|
2020-03-07T13:09:21.000Z
|
2022-03-19T14:24:13.000Z
|
tests/test_ban_tk_connect.py
|
FlorianSW/hll_rcon_tool
|
26a37b07eaab34dfb5a6d10c0f02e0fcae51dd88
|
[
"MIT"
] | 48
|
2020-03-26T22:19:40.000Z
|
2021-12-12T17:31:06.000Z
|
tests/test_ban_tk_connect.py
|
FlorianSW/hll_rcon_tool
|
26a37b07eaab34dfb5a6d10c0f02e0fcae51dd88
|
[
"MIT"
] | 48
|
2020-03-03T09:44:36.000Z
|
2022-03-18T07:33:39.000Z
|
from unittest import mock
from rcon.game_logs import auto_ban_if_tks_right_after_connection
@mock.patch("rcon.game_logs.get_player_profile", autospec=True, return_value=None)
@mock.patch(
"rcon.game_logs.get_config",
return_value={
"BAN_TK_ON_CONNECT": {
"enabled": True,
"message": "Vous avez été banni automatiquement car votre premiere action apres connection est un TEAM KILL.\nSi c'etait un accident demandez votre déban sur: https://discord.io/HLLFR (Via un navigateur, pas directement dans discord)\n\nYou've been banned automatically for TEAM KILLING. Cheers",
"author_name": "HATERS GONNA HATE",
"exclude_weapons": ["None"],
"max_time_after_connect_minutes": 5,
"ignore_tk_after_n_kills": 1,
"ignore_tk_after_n_death": 2,
"discord_webhook_url": "",
"discord_webhook_message": "{player} banned for TK right after connecting",
"whitelist_players": {
"has_flag": ["✅"],
"is_vip": True,
"has_at_least_n_sessions": 10,
},
},
},
)
def test_ban_excluded_weapon(*args):
tk_log = {
"version": 1,
"timestamp_ms": 1612695641000,
"action": "TEAM KILL",
"player": "[ARC] DYDSO ★ツ",
"steam_id_64_1": 76561198091327692,
"player2": "Francky Mc Fly",
"steam_id_64_1": 76561198091327692,
"weapon": "None",
"raw": "[646 ms (1612695641)] TEAM KILL: [ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
"content": "[ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
}
logs = [
tk_log,
{
"id": 1381028,
"version": 1,
"creation_time": "2021-02-07T11:02:11.725",
"timestamp_ms": 1612695428000,
"action": "CONNECTED",
"player": "[ARC] DYDSO ★ツ",
"player2": None,
"weapon": None,
"steam_id_64_1": None,
"steam_id_64_1": None,
"raw": "[600 ms (1612695428)] CONNECTED [ARC] DYDSO ★ツ",
"content": "[ARC] DYDSO ★ツ",
"server": "1",
},
]
with mock.patch("rcon.game_logs.RecordedRcon") as rcon, mock.patch(
"rcon.game_logs.get_recent_logs", return_value={"logs": logs}
) as get:
rcon.get_vips_ids = mock.MagicMock(return_value=[])
auto_ban_if_tks_right_after_connection(rcon, tk_log)
rcon.do_perma_ban.assert_not_called()
@mock.patch("rcon.game_logs.get_player_profile", autospec=True, return_value=None)
@mock.patch(
"rcon.game_logs.get_config",
return_value={
"BAN_TK_ON_CONNECT": {
"enabled": True,
"message": "Vous avez été banni automatiquement car votre premiere action apres connection est un TEAM KILL.\nSi c'etait un accident demandez votre déban sur: https://discord.io/HLLFR (Via un navigateur, pas directement dans discord)\n\nYou've been banned automatically for TEAM KILLING. Cheers",
"author_name": "HATERS GONNA HATE",
"exclude_weapons": ["None"],
"max_time_after_connect_minutes": 5,
"ignore_tk_after_n_kills": 1,
"ignore_tk_after_n_death": 2,
"discord_webhook_url": "",
"discord_webhook_message": "{player} banned for TK right after connecting",
"whitelist_players": {
"has_flag": ["✅"],
"is_vip": True,
"has_at_least_n_sessions": 10,
},
},
},
)
def test_ban_success(*args):
tk_log = {
"version": 1,
"timestamp_ms": 1612695641000,
"action": "TEAM KILL",
"player": "[ARC] DYDSO ★ツ",
"steam_id_64_1": 76561198091327692,
"player2": "Francky Mc Fly",
"steam_id_64_1": 76561198091327692,
"weapon": "G43",
"raw": "[646 ms (1612695641)] TEAM KILL: [ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
"content": "[ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
}
logs = [
tk_log,
{
"id": 1381028,
"version": 1,
"creation_time": "2021-02-07T11:02:11.725",
"timestamp_ms": 1612695428000,
"action": "CONNECTED",
"player": "[ARC] DYDSO ★ツ",
"player2": None,
"weapon": None,
"steam_id_64_1": None,
"steam_id_64_1": None,
"raw": "[600 ms (1612695428)] CONNECTED [ARC] DYDSO ★ツ",
"content": "[ARC] DYDSO ★ツ",
"server": "1",
},
]
with mock.patch("rcon.game_logs.RecordedRcon") as rcon, mock.patch(
"rcon.game_logs.get_recent_logs", return_value={"logs": logs}
) as get:
rcon.get_vips_ids = mock.MagicMock(return_value=[])
auto_ban_if_tks_right_after_connection(rcon, tk_log)
rcon.do_perma_ban.assert_called()
@mock.patch("rcon.game_logs.get_player_profile", autospec=True, return_value=None)
@mock.patch(
"rcon.game_logs.get_config",
return_value={
"BAN_TK_ON_CONNECT": {
"enabled": True,
"message": "Vous avez été banni automatiquement car votre premiere action apres connection est un TEAM KILL.\nSi c'etait un accident demandez votre déban sur: https://discord.io/HLLFR (Via un navigateur, pas directement dans discord)\n\nYou've been banned automatically for TEAM KILLING. Cheers",
"author_name": "HATERS GONNA HATE",
"exclude_weapons": ["None"],
"max_time_after_connect_minutes": 5,
"ignore_tk_after_n_kills": 1,
"ignore_tk_after_n_death": 2,
"discord_webhook_url": "",
"discord_webhook_message": "{player} banned for TK right after connecting",
"whitelist_players": {
"has_flag": ["✅"],
"is_vip": True,
"has_at_least_n_sessions": 10,
},
},
},
)
def test_ban_ignored_kill(*args):
tk_log = {
"version": 1,
"timestamp_ms": 1612695641000,
"action": "TEAM KILL",
"player": "[ARC] DYDSO ★ツ",
"steam_id_64_1": 76561198091327692,
"player2": "Francky Mc Fly",
"steam_id_64_1": 76561198091327692,
"weapon": "G43",
"raw": "[646 ms (1612695641)] TEAM KILL: [ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
"content": "[ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
}
logs = [
tk_log,
{
"version": 1,
"timestamp_ms": 1612695641000,
"action": "KILL",
"player": "[ARC] DYDSO ★ツ",
"steam_id_64_1": 76561198091327692,
"player2": "Francky Mc Fly",
"steam_id_64_1": 76561198091327692,
"weapon": "G43",
"raw": "[646 ms (1612695641)] TEAM KILL: [ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
"content": "[ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
},
{
"id": 1381028,
"version": 1,
"creation_time": "2021-02-07T11:02:11.725",
"timestamp_ms": 1612695428000,
"action": "CONNECTED",
"player": "[ARC] DYDSO ★ツ",
"player2": None,
"weapon": None,
"steam_id_64_1": None,
"steam_id_64_1": None,
"raw": "[600 ms (1612695428)] CONNECTED [ARC] DYDSO ★ツ",
"content": "[ARC] DYDSO ★ツ",
"server": "1",
},
]
with mock.patch("rcon.game_logs.RecordedRcon") as rcon, mock.patch(
"rcon.game_logs.get_recent_logs", return_value={"logs": logs}
) as get:
rcon.get_vips_ids = mock.MagicMock(return_value=[])
auto_ban_if_tks_right_after_connection(rcon, tk_log)
rcon.do_perma_ban.assert_not_called()
@mock.patch("rcon.game_logs.get_player_profile", autospec=True, return_value=None)
@mock.patch(
"rcon.game_logs.get_config",
return_value={
"BAN_TK_ON_CONNECT": {
"enabled": True,
"message": "Vous avez été banni automatiquement car votre premiere action apres connection est un TEAM KILL.\nSi c'etait un accident demandez votre déban sur: https://discord.io/HLLFR (Via un navigateur, pas directement dans discord)\n\nYou've been banned automatically for TEAM KILLING. Cheers",
"author_name": "HATERS GONNA HATE",
"exclude_weapons": ["None"],
"max_time_after_connect_minutes": 5,
"ignore_tk_after_n_kills": 1,
"ignore_tk_after_n_death": 2,
"discord_webhook_url": "",
"discord_webhook_message": "{player} banned for TK right after connecting",
"whitelist_players": {
"has_flag": ["✅"],
"is_vip": True,
"has_at_least_n_sessions": 10,
},
},
},
)
def test_ban_count_one_death(*args):
tk_log = {
"version": 1,
"timestamp_ms": 1612695641000,
"action": "TEAM KILL",
"player": "[ARC] DYDSO ★ツ",
"steam_id_64_1": 76561198091327692,
"player2": "Francky Mc Fly",
"steam_id_64_1": 76561198091327692,
"weapon": "G43",
"raw": "[646 ms (1612695641)] TEAM KILL: [ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
"content": "[ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
}
logs = [
tk_log,
{
"version": 1,
"timestamp_ms": 1612695641000,
"action": "KILL",
"player2": "[ARC] DYDSO ★ツ",
"steam_id_64_2": 76561198091327692,
"player": "Francky Mc Fly",
"steam_id_64_1": 76561198091327692,
"weapon": "G43",
"raw": "[646 ms (1612695641)] TEAM KILL: [ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
"content": "[ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
},
{
"id": 1381028,
"version": 1,
"creation_time": "2021-02-07T11:02:11.725",
"timestamp_ms": 1612695428000,
"action": "CONNECTED",
"player": "[ARC] DYDSO ★ツ",
"player2": None,
"weapon": None,
"steam_id_64_1": None,
"steam_id_64_1": None,
"raw": "[600 ms (1612695428)] CONNECTED [ARC] DYDSO ★ツ",
"content": "[ARC] DYDSO ★ツ",
"server": "1",
},
]
with mock.patch("rcon.game_logs.RecordedRcon") as rcon, mock.patch(
"rcon.game_logs.get_recent_logs", return_value={"logs": logs}
) as get:
rcon.get_vips_ids = mock.MagicMock(return_value=[])
auto_ban_if_tks_right_after_connection(rcon, tk_log)
rcon.do_perma_ban.assert_called()
@mock.patch("rcon.game_logs.get_player_profile", autospec=True, return_value=None)
@mock.patch(
"rcon.game_logs.get_config",
return_value={
"BAN_TK_ON_CONNECT": {
"enabled": True,
"message": "Vous avez été banni automatiquement car votre premiere action apres connection est un TEAM KILL.\nSi c'etait un accident demandez votre déban sur: https://discord.io/HLLFR (Via un navigateur, pas directement dans discord)\n\nYou've been banned automatically for TEAM KILLING. Cheers",
"author_name": "HATERS GONNA HATE",
"exclude_weapons": ["None"],
"max_time_after_connect_minutes": 5,
"ignore_tk_after_n_kills": 1,
"ignore_tk_after_n_death": 2,
"discord_webhook_url": "",
"discord_webhook_message": "{player} banned for TK right after connecting",
"whitelist_players": {
"has_flag": ["✅"],
"is_vip": True,
"has_at_least_n_sessions": 10,
},
},
},
)
def test_ban_ignored_2_death(*args):
tk_log = {
"version": 1,
"timestamp_ms": 1612695641000,
"action": "TEAM KILL",
"player": "[ARC] DYDSO ★ツ",
"steam_id_64_1": 76561198091327692,
"player2": "Francky Mc Fly",
"steam_id_64_1": 76561198091327692,
"weapon": "G43",
"raw": "[646 ms (1612695641)] TEAM KILL: [ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
"content": "[ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
}
logs = [
tk_log,
{
"version": 1,
"timestamp_ms": 1612695641000,
"action": "KILL",
"player2": "[ARC] DYDSO ★ツ",
"steam_id_64_2": 76561198091327692,
"player": "Francky Mc Fly",
"steam_id_64_1": 76561198091327692,
"weapon": "G43",
"raw": "[646 ms (1612695641)] TEAM KILL: [ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
"content": "[ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
},
{
"version": 1,
"timestamp_ms": 1612695641000,
"action": "KILL",
"player2": "[ARC] DYDSO ★ツ",
"steam_id_64_2": 76561198091327692,
"player": "Francky Mc Fly",
"steam_id_64_1": 76561198091327692,
"weapon": "G43",
"raw": "[646 ms (1612695641)] TEAM KILL: [ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
"content": "[ARC] DYDSO ★ツ(Axis/76561198091327692) -> Francky Mc Fly(Axis/76561198133214514) with None",
},
{
"id": 1381028,
"version": 1,
"creation_time": "2021-02-07T11:02:11.725",
"timestamp_ms": 1612695428000,
"action": "CONNECTED",
"player": "[ARC] DYDSO ★ツ",
"player2": None,
"weapon": None,
"steam_id_64_1": None,
"steam_id_64_1": None,
"raw": "[600 ms (1612695428)] CONNECTED [ARC] DYDSO ★ツ",
"content": "[ARC] DYDSO ★ツ",
"server": "1",
},
]
with mock.patch("rcon.game_logs.RecordedRcon") as rcon, mock.patch(
"rcon.game_logs.get_recent_logs", return_value={"logs": logs}
) as get:
rcon.get_vips_ids = mock.MagicMock(return_value=[])
auto_ban_if_tks_right_after_connection(rcon, tk_log)
rcon.do_perma_ban.assert_not_called()
| 41.163435
| 308
| 0.581023
|
bca9aca223e926104ce85f5b70b7161a53199330
| 1,614
|
py
|
Python
|
adventofcode/twentytwenty/day8.py
|
Launchpaddy/adventofcode-1
|
1104b981ca2e8f65a0349cfee1d63bd2aa365d28
|
[
"MIT"
] | null | null | null |
adventofcode/twentytwenty/day8.py
|
Launchpaddy/adventofcode-1
|
1104b981ca2e8f65a0349cfee1d63bd2aa365d28
|
[
"MIT"
] | null | null | null |
adventofcode/twentytwenty/day8.py
|
Launchpaddy/adventofcode-1
|
1104b981ca2e8f65a0349cfee1d63bd2aa365d28
|
[
"MIT"
] | null | null | null |
def main():
calculate_num()
def calculate_num():
"""Calculate the answer
"""
total = 0
lines_run = []
total = run_recursion(0, lines_run, total)
print('Accumulator Value:', total)
return total
def run_recursion(index, lines_run, total):
"""Recursively iterate through the data to sum the accumulator value
"""
action, direction, number = format_data()[index]
index = int(index)
# print(format_data()[index])
# print(lines_run)
if index in lines_run:
print('Already visited', index, '- stopping!')
return total
if action == 'nop':
lines_run.append(index)
return run_recursion(index+1, lines_run, total)
elif action == 'acc':
if direction == '+':
total += number
elif direction == '-':
total -= number
lines_run.append(index)
index += 1
elif action == 'jmp':
lines_run.append(index)
if direction == '+':
index += number
elif direction == '-':
index -= number
# print('index:', index)
return run_recursion(index, lines_run, total)
def format_data():
"""Format the data into a usable structure
"""
with open('adventofcode/twentytwenty/static_data/day8.txt', 'r') as f:
lines = f.readlines()
data = []
for index, line in enumerate(lines):
action = line[:3].lower()
direction = line[4:5].strip()
number = int(line[5:])
data.append([action, direction, number])
# print(data)
return data
if __name__ == '__main__':
main()
| 24.454545
| 74
| 0.578067
|
678eb0956582061738193912b937301d29822068
| 5,138
|
py
|
Python
|
nevergrad/optimization/test_optimizerlib.py
|
GuodongZhu/nevergrad
|
446bc2ef0c7ef318578013721843f7701507fd67
|
[
"MIT"
] | 1
|
2019-01-28T19:52:06.000Z
|
2019-01-28T19:52:06.000Z
|
nevergrad/optimization/test_optimizerlib.py
|
GuodongZhu/nevergrad
|
446bc2ef0c7ef318578013721843f7701507fd67
|
[
"MIT"
] | null | null | null |
nevergrad/optimization/test_optimizerlib.py
|
GuodongZhu/nevergrad
|
446bc2ef0c7ef318578013721843f7701507fd67
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import warnings
from pathlib import Path
from unittest import SkipTest
from unittest import TestCase
from typing import Type
import genty
import numpy as np
import pandas as pd
from ..common.typetools import ArrayLike
from . import base
from .recaster import FinishedUnderlyingOptimizerWarning
from . import optimizerlib
from .optimizerlib import registry
def fitness(x: ArrayLike) -> float:
"""Simple quadratic fitness function which can be used with dimension up to 4
"""
x0 = [0.5, -0.8, 0, 4][:len(x)]
return float(np.sum((np.array(x, copy=False) - x0)**2))
def check_optimizer(optimizer_cls: Type[base.Optimizer], budget: int = 300, verify_value: bool = True) -> None:
# recast optimizer do not support num_workers > 1, and respect no_parallelization.
num_workers = (1 if optimizer_cls.recast or optimizer_cls.no_parallelization else 2)
optimizer = optimizer_cls(dimension=2, budget=budget, num_workers=num_workers)
num_attempts = 1 if not verify_value else 2 # allow 2 attemps to get to the optimum (shit happens...)
for k in range(1, num_attempts + 1):
with warnings.catch_warnings():
# benchmark do not need to be efficient
warnings.filterwarnings("ignore", category=base.InefficientSettingsWarning)
# some optimizers finish early
warnings.filterwarnings("ignore", category=FinishedUnderlyingOptimizerWarning)
# now optimize :)
output = optimizer.optimize(fitness)
if verify_value:
try:
np.testing.assert_array_almost_equal(output, [0.5, -0.8], decimal=1)
except AssertionError as e:
print(f"Attemp #{k}: failed with value {tuple(output)}")
if k == num_attempts:
raise e
else:
break
# make sure we are correctly tracking the best values
archive = optimizer.archive
assert (optimizer.current_bests["pessimistic"].pessimistic_confidence_bound ==
min(v.pessimistic_confidence_bound for v in archive.values()))
SLOW = ["NoisyDE", "NoisyBandit", "SPSA", "NoisyOnePlusOne", "OptimisticNoisyOnePlusOne"]
@genty.genty
class OptimizerTests(TestCase):
recommendations = pd.DataFrame(columns=[f"v{k}" for k in range(4)])
_RECOM_FILE = Path(__file__).parent / "recorded_recommendations.csv"
@classmethod
def setUpClass(cls) -> None:
# load recorded recommendations
if cls._RECOM_FILE.exists():
cls.recommendations = pd.read_csv(cls._RECOM_FILE, index_col=0)
@classmethod
def tearDownClass(cls) -> None:
# sort and remove unused names
# then update recommendation file
names = sorted(x for x in cls.recommendations.index if x in registry)
recom = cls.recommendations.loc[names, :]
recom.iloc[:, 1:] = np.round(recom.iloc[:, 1:], 12)
recom.to_csv(cls._RECOM_FILE)
@genty.genty_dataset(**{name: (name, optimizer,) for name, optimizer in registry.items() if "BO" not in name}) # type: ignore
def test_optimizers(self, name: str, optimizer_cls: Type[base.Optimizer]) -> None:
verify = not optimizer_cls.one_shot and name not in SLOW and "Discrete" not in name
check_optimizer(optimizer_cls, budget=300, verify_value=verify)
@genty.genty_dataset(**{name: (name, optimizer,) for name, optimizer in registry.items() if "BO" not in name}) # type: ignore
def test_optimizers_recommendation(self, name: str, optimizer_cls: Type[base.Optimizer]) -> None:
if name in ["CMA", "Portfolio"]:
raise SkipTest("Not playing nicely with the tests") # thread problem?
np.random.seed(12)
if optimizer_cls.recast:
random.seed(12) # may depend on non numpy generator
optim = optimizer_cls(dimension=4, budget=6, num_workers=1)
output = optim.optimize(fitness)
if name not in self.recommendations.index:
self.recommendations.loc[name, :] = tuple(output)
raise ValueError(f'Recorded the value for optimizer "{name}", please rerun this test locally.')
np.testing.assert_array_almost_equal(output, self.recommendations.loc[name, :], decimal=10,
err_msg="Something has changed, if this is normal, delete the following "
f"file and rerun to update the values:\n{self._RECOM_FILE}")
def test_pso_to_real() -> None:
output = optimizerlib.PSO.to_real([.3, .5, .9])
np.testing.assert_almost_equal(output, [-.52, 0, 1.28], decimal=2)
np.testing.assert_raises(AssertionError, optimizerlib.PSO.to_real, [.3, .5, 1.2])
def test_portfolio_budget() -> None:
for k in range(3, 13):
optimizer = optimizerlib.Portfolio(dimension=2, budget=k)
np.testing.assert_equal(optimizer.budget, sum(o.budget for o in optimizer.optims))
| 45.469027
| 130
| 0.673998
|
a23ffd86da7d7249f2bf0c69a4bfdd26cee3e9ee
| 636
|
py
|
Python
|
sgx-ansible/roles/role-gantsign.keyboard/molecule/default/tests/test_role.py
|
integritee-network/sgx-setup
|
f685041ef1b0611a5a28ee624e4695e06788f825
|
[
"Apache-2.0"
] | 4
|
2019-10-11T13:51:27.000Z
|
2021-06-17T12:55:52.000Z
|
sgx-ansible/roles/role-gantsign.keyboard/molecule/default/tests/test_role.py
|
integritee-network/sgx-setup
|
f685041ef1b0611a5a28ee624e4695e06788f825
|
[
"Apache-2.0"
] | 8
|
2021-09-10T08:52:48.000Z
|
2022-02-07T14:59:22.000Z
|
sgx-ansible/roles/role-gantsign.keyboard/molecule/default/tests/test_role.py
|
integritee-network/sgx-setup
|
f685041ef1b0611a5a28ee624e4695e06788f825
|
[
"Apache-2.0"
] | 1
|
2021-05-22T12:54:31.000Z
|
2021-05-22T12:54:31.000Z
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_keyboard_file(host):
kb = host.file('/etc/default/keyboard')
assert kb.exists
assert kb.is_file
assert kb.user == 'root'
assert kb.group == 'root'
assert oct(kb.mode) == '0644'
assert kb.contains('XKBMODEL="pc105"')
assert kb.contains('XKBLAYOUT="brai"')
assert kb.contains('XKBVARIANT="right_hand"')
assert kb.contains('XKBOPTIONS="lv3:alt_switch,compose:rctrl"')
assert kb.contains('BACKSPACE="guess"')
| 27.652174
| 67
| 0.709119
|
d03316a7647f1d0f736db34ce4d3d362aa3b0c4d
| 3,412
|
py
|
Python
|
PS_utils.py
|
thuyvytran/CAPS-Continuous-Action-Policy-Shaping
|
ff44ab20119ba9fccecb2e86d5eaf6e0c0119ac7
|
[
"MIT"
] | null | null | null |
PS_utils.py
|
thuyvytran/CAPS-Continuous-Action-Policy-Shaping
|
ff44ab20119ba9fccecb2e86d5eaf6e0c0119ac7
|
[
"MIT"
] | null | null | null |
PS_utils.py
|
thuyvytran/CAPS-Continuous-Action-Policy-Shaping
|
ff44ab20119ba9fccecb2e86d5eaf6e0c0119ac7
|
[
"MIT"
] | null | null | null |
from operator import truediv
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.distributions.normal import Normal
from scipy.special import expit
def sample_normal(agent, actor, observation, with_noise=False, max_action=2, env_only=False,
with_grad_env=True, with_grad_agent=True, kappa=.9):
def get_dist(agent, actor, observation):
observation = torch.Tensor([observation]).to('cpu')
mu1, sigma1 = agent.actor.get_dist(observation, with_grad=with_grad_agent)
mu2, sigma2 = actor.actor.get_dist(observation, with_grad = with_grad_env)
mu1 = mu1[0].detach().numpy()
sigma1 = sigma1[0].detach().numpy()
mu2 = mu2[0].detach().numpy()
sigma2 = sigma2[0].detach().numpy()
#mu = (mu1 + mu2)/2
#kl = expit(np.log(np.sqrt(sigma2)/np.sqrt(sigma1)) + (sigma1+(mu1-mu2)**2)/(2*sigma2) - .5)
kl = np.tanh(np.log(np.sqrt(sigma2)/np.sqrt(sigma2)) + (sigma2+(mu1-mu2)**2)/(2*sigma2) - .5)
#kl2 = np.log(np.sqrt(sigma2)/np.sqrt(sigma1)) + (sigma1+(mu1-mu2)**2)/(2*sigma2) - .5
#kl = np.tanh((mu1-mu2)**2)
for i in range(len(kl)):
if kl[i] > kappa:
kl[i] = kappa
#kl = kl*2
#kl = .95
mu = mu1*(kl) + mu2*(1-(kl))
#mu = (mu1 + mu2)/2
#sigma = sigma2
#sigma = np.zeros(4)
#sigma[0] = max(sigma1[0], sigma2[0])
#sigma[1] = max(sigma1[1], sigma2[1])
#sigma[2] = max(sigma1[2], sigma2[2])
#sigma[3] = max(sigma1[3], sigma2[3])
#sigma = (sigma1+sigma2)/2
#sigma = sigma1*(kl) + sigma2*(1-(kl))
sigma = sigma2
#mu[2] = 0
#mu[3] = 0
#sigma[2] = 0
#sigma[3] = 0
mu = torch.from_numpy(mu)
sigma = torch.from_numpy(sigma)
#print(mu, sigma)
return Normal(mu, sigma), mu.numpy(), sigma.numpy(), np.mean(kl)
def get_dist_env(actor, observation):
observation = torch.Tensor([observation]).to('cpu')
mu1, sigma1 = actor.actor.get_dist(observation, with_grad=with_grad_env)
#mu2, sigma2 = actor.actor.get_dist(observation)
mu1 = mu1[0].detach().numpy()
sigma1 = sigma1[0].detach().numpy()
#mu2 = mu2[0].detach().numpy()
#sigma2 = sigma2[0].detach().numpy()
#mu = (mu1 + mu2)/2
mu = mu1
sigma = np.zeros(4)
sigma[0] = sigma1[0]
sigma[1] = sigma1[1]
sigma[2] = sigma1[2]
sigma[3] = sigma1[3]
#mu[2] = 0
#mu[3] = 0
#sigma[2] = 0
#sigma[3] = 0
mu = torch.from_numpy(mu)
sigma = torch.from_numpy(sigma)
#print(mu, sigma)
return Normal(mu, sigma), mu, sigma
if env_only is False:
dist, mu, sigma, kl = get_dist(agent, actor, observation)
if with_noise:
sample = dist.rsample().numpy()
else:
sample = dist.sample().numpy()
#print(sample)
sample = max_action * np.tanh(sample)
return sample, dist, mu, sigma, kl
else:
dist, mu, sigma = get_dist_env(actor, observation)
if with_noise:
sample = dist.rsample().numpy()
else:
sample = dist.sample().numpy()
#print(sample)
sample = max_action * np.tanh(sample)
return sample, dist, mu, sigma
| 37.494505
| 101
| 0.55129
|
a7f67df4ebf915dcaf1290c750d0329ee819e7d1
| 8,143
|
py
|
Python
|
microsoft_network_monitor_v2.py
|
Vector35/kaitai
|
71fd8c31289aaeba12f48ae394631f9a56cfe056
|
[
"MIT"
] | 20
|
2019-09-28T01:44:58.000Z
|
2022-03-09T08:35:56.000Z
|
microsoft_network_monitor_v2.py
|
Vector35/kaitai
|
71fd8c31289aaeba12f48ae394631f9a56cfe056
|
[
"MIT"
] | 4
|
2020-12-23T01:51:26.000Z
|
2021-12-15T14:41:50.000Z
|
microsoft_network_monitor_v2.py
|
Vector35/kaitai
|
71fd8c31289aaeba12f48ae394631f9a56cfe056
|
[
"MIT"
] | 4
|
2020-02-20T18:47:27.000Z
|
2021-06-17T01:24:09.000Z
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from . import kaitaistruct
from .kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
from . import windows_systemtime
from . import ethernet_frame
class MicrosoftNetworkMonitorV2(KaitaiStruct):
"""Microsoft Network Monitor (AKA Netmon) is a proprietary Microsoft's
network packet sniffing and analysis tool. It can save captured
traffic as .cap files, which usually contain the packets and may
contain some additional info - enhanced network info, calculated
statistics, etc.
There are at least 2 different versions of the format: v1 and
v2. Netmon v3 seems to use the same file format as v1.
.. seealso::
Source - https://msdn.microsoft.com/en-us/library/windows/desktop/ee817717.aspx
"""
class Linktype(Enum):
null_linktype = 0
ethernet = 1
ax25 = 3
ieee802_5 = 6
arcnet_bsd = 7
slip = 8
ppp = 9
fddi = 10
ppp_hdlc = 50
ppp_ether = 51
atm_rfc1483 = 100
raw = 101
c_hdlc = 104
ieee802_11 = 105
frelay = 107
loop = 108
linux_sll = 113
ltalk = 114
pflog = 117
ieee802_11_prism = 119
ip_over_fc = 122
sunatm = 123
ieee802_11_radiotap = 127
arcnet_linux = 129
apple_ip_over_ieee1394 = 138
mtp2_with_phdr = 139
mtp2 = 140
mtp3 = 141
sccp = 142
docsis = 143
linux_irda = 144
user0 = 147
user1 = 148
user2 = 149
user3 = 150
user4 = 151
user5 = 152
user6 = 153
user7 = 154
user8 = 155
user9 = 156
user10 = 157
user11 = 158
user12 = 159
user13 = 160
user14 = 161
user15 = 162
ieee802_11_avs = 163
bacnet_ms_tp = 165
ppp_pppd = 166
gprs_llc = 169
gpf_t = 170
gpf_f = 171
linux_lapd = 177
bluetooth_hci_h4 = 187
usb_linux = 189
ppi = 192
ieee802_15_4 = 195
sita = 196
erf = 197
bluetooth_hci_h4_with_phdr = 201
ax25_kiss = 202
lapd = 203
ppp_with_dir = 204
c_hdlc_with_dir = 205
frelay_with_dir = 206
ipmb_linux = 209
ieee802_15_4_nonask_phy = 215
usb_linux_mmapped = 220
fc_2 = 224
fc_2_with_frame_delims = 225
ipnet = 226
can_socketcan = 227
ipv4 = 228
ipv6 = 229
ieee802_15_4_nofcs = 230
dbus = 231
dvb_ci = 235
mux27010 = 236
stanag_5066_d_pdu = 237
nflog = 239
netanalyzer = 240
netanalyzer_transparent = 241
ipoib = 242
mpeg_2_ts = 243
ng40 = 244
nfc_llcp = 245
infiniband = 247
sctp = 248
usbpcap = 249
rtac_serial = 250
bluetooth_le_ll = 251
netlink = 253
bluetooth_linux_monitor = 254
bluetooth_bredr_bb = 255
bluetooth_le_ll_with_phdr = 256
profibus_dl = 257
pktap = 258
epon = 259
ipmi_hpm_2 = 260
zwave_r1_r2 = 261
zwave_r3 = 262
wattstopper_dlm = 263
iso_14443 = 264
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.signature = self._io.read_bytes(4)
if not self.signature == b"\x47\x4D\x42\x55":
raise kaitaistruct.ValidationNotEqualError(b"\x47\x4D\x42\x55", self.signature, self._io, u"/seq/0")
self.version_minor = self._io.read_u1()
self.version_major = self._io.read_u1()
self.mac_type = KaitaiStream.resolve_enum(MicrosoftNetworkMonitorV2.Linktype, self._io.read_u2le())
self.time_capture_start = windows_systemtime.WindowsSystemtime(self._io)
self.frame_table_ofs = self._io.read_u4le()
self.frame_table_len = self._io.read_u4le()
self.user_data_ofs = self._io.read_u4le()
self.user_data_len = self._io.read_u4le()
self.comment_ofs = self._io.read_u4le()
self.comment_len = self._io.read_u4le()
self.statistics_ofs = self._io.read_u4le()
self.statistics_len = self._io.read_u4le()
self.network_info_ofs = self._io.read_u4le()
self.network_info_len = self._io.read_u4le()
self.conversation_stats_ofs = self._io.read_u4le()
self.conversation_stats_len = self._io.read_u4le()
class FrameIndex(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(MicrosoftNetworkMonitorV2.FrameIndexEntry(self._io, self, self._root))
i += 1
class FrameIndexEntry(KaitaiStruct):
"""Each index entry is just a pointer to where the frame data is
stored in the file.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ofs = self._io.read_u4le()
@property
def body(self):
"""Frame body itself."""
if hasattr(self, '_m_body'):
return self._m_body if hasattr(self, '_m_body') else None
io = self._root._io
_pos = io.pos()
io.seek(self.ofs)
self._m_body = MicrosoftNetworkMonitorV2.Frame(io, self, self._root)
io.seek(_pos)
return self._m_body if hasattr(self, '_m_body') else None
class Frame(KaitaiStruct):
"""A container for actually captured network data. Allow to
timestamp individual frames and designates how much data from
the original packet was actually written into the file.
.. seealso::
Source - https://msdn.microsoft.com/en-us/library/windows/desktop/ee831821.aspx
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ts_delta = self._io.read_u8le()
self.orig_len = self._io.read_u4le()
self.inc_len = self._io.read_u4le()
_on = self._root.mac_type
if _on == MicrosoftNetworkMonitorV2.Linktype.ethernet:
self._raw_body = self._io.read_bytes(self.inc_len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = ethernet_frame.EthernetFrame(_io__raw_body)
else:
self.body = self._io.read_bytes(self.inc_len)
@property
def frame_table(self):
"""Index that is used to access individual captured frames."""
if hasattr(self, '_m_frame_table'):
return self._m_frame_table if hasattr(self, '_m_frame_table') else None
_pos = self._io.pos()
self._io.seek(self.frame_table_ofs)
self._raw__m_frame_table = self._io.read_bytes(self.frame_table_len)
_io__raw__m_frame_table = KaitaiStream(BytesIO(self._raw__m_frame_table))
self._m_frame_table = MicrosoftNetworkMonitorV2.FrameIndex(_io__raw__m_frame_table, self, self._root)
self._io.seek(_pos)
return self._m_frame_table if hasattr(self, '_m_frame_table') else None
| 33.236735
| 132
| 0.601253
|
2b2d6d68ed44c9b3391c5d42a892e8fcd56a4cfd
| 1,768
|
py
|
Python
|
src/unet/model/unet.py
|
happog/robin
|
f1ef0983fcd163e5fae722a845b3fab2d3022c56
|
[
"MIT"
] | 155
|
2018-10-29T08:11:48.000Z
|
2022-03-11T01:55:02.000Z
|
src/unet/model/unet.py
|
happog/robin
|
f1ef0983fcd163e5fae722a845b3fab2d3022c56
|
[
"MIT"
] | 11
|
2018-12-19T13:16:23.000Z
|
2021-11-18T00:54:20.000Z
|
src/unet/model/unet.py
|
happog/robin
|
f1ef0983fcd163e5fae722a845b3fab2d3022c56
|
[
"MIT"
] | 35
|
2018-12-28T14:38:19.000Z
|
2022-02-21T10:58:50.000Z
|
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from keras.layers.core import SpatialDropout2D, Activation
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.models import Model
def double_conv_layer(inputs, filter):
conv = Conv2D(filter, (3, 3), padding='same', kernel_initializer='he_normal')(inputs)
conv = BatchNormalization(axis=3)(conv)
conv = Activation('relu')(conv)
conv = Conv2D(filter, (3, 3), padding='same', kernel_initializer='he_normal')(conv)
conv = BatchNormalization(axis=3)(conv)
conv = Activation('relu')(conv)
conv = SpatialDropout2D(0.1)(conv)
return conv
def down_layer(inputs, filter):
"""Create downsampling layer."""
conv = double_conv_layer(inputs, filter)
pool = MaxPooling2D(pool_size=(2, 2))(conv)
return conv, pool
def up_layer(inputs, concats, filter):
"""Create upsampling layer."""
return double_conv_layer(concatenate([UpSampling2D(size=(2, 2))(inputs), concats], axis=3), filter)
def unet():
"""Create U-net."""
inputs = Input((128, 128, 1))
# Downsampling.
down1, pool1 = down_layer(inputs, 32)
down2, pool2 = down_layer(pool1, 64)
down3, pool3 = down_layer(pool2, 128)
down4, pool4 = down_layer(pool3, 256)
down5, pool5 = down_layer(pool4, 512)
# Bottleneck.
bottleneck = double_conv_layer(pool5, 1024)
# Upsampling.
up5 = up_layer(bottleneck, down5, 512)
up4 = up_layer(up5, down4, 256)
up3 = up_layer(up4, down3, 128)
up2 = up_layer(up3, down2, 64)
up1 = up_layer(up2, down1, 32)
outputs = Conv2D(1, (1, 1))(up1)
outputs = Activation('sigmoid')(outputs)
model = Model(inputs, outputs)
return model
| 30.482759
| 103
| 0.68552
|
b15c4d6d2561cc678574fc197a95d1d1d1ffafe3
| 430
|
py
|
Python
|
nni/experiment/management.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 9,680
|
2019-05-07T01:42:30.000Z
|
2022-03-31T16:48:33.000Z
|
nni/experiment/management.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 1,957
|
2019-05-06T21:44:21.000Z
|
2022-03-31T09:21:53.000Z
|
nni/experiment/management.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 1,571
|
2019-05-07T06:42:55.000Z
|
2022-03-31T03:19:24.000Z
|
from pathlib import Path
import random
import string
def generate_experiment_id() -> str:
return ''.join(random.sample(string.ascii_lowercase + string.digits, 8))
def create_experiment_directory(experiment_id: str) -> Path:
path = Path.home() / 'nni-experiments' / experiment_id
path.mkdir(parents=True, exist_ok=True)
return path
# TODO: port shangning's work here, and use it in Experiment.start()/.stop()
| 25.294118
| 76
| 0.732558
|
0f26057d79542601d19c88ce02a0452c25b9ee6b
| 12,585
|
py
|
Python
|
sdk/python/pulumi_azure_native/hanaonazure/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/hanaonazure/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/hanaonazure/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'DiskResponse',
'HardwareProfileResponse',
'IpAddressResponse',
'NetworkProfileResponse',
'OSProfileResponse',
'StorageProfileResponse',
]
@pulumi.output_type
class DiskResponse(dict):
"""
Specifies the disk information fo the HANA instance
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "diskSizeGB":
suggest = "disk_size_gb"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
lun: int,
disk_size_gb: Optional[int] = None,
name: Optional[str] = None):
"""
Specifies the disk information fo the HANA instance
:param int lun: Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
:param int disk_size_gb: Specifies the size of an empty data disk in gigabytes.
:param str name: The disk name.
"""
pulumi.set(__self__, "lun", lun)
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def lun(self) -> int:
"""
Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
"""
return pulumi.get(self, "lun")
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
"""
Specifies the size of an empty data disk in gigabytes.
"""
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The disk name.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class HardwareProfileResponse(dict):
"""
Specifies the hardware settings for the HANA instance.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "hanaInstanceSize":
suggest = "hana_instance_size"
elif key == "hardwareType":
suggest = "hardware_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in HardwareProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
HardwareProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
HardwareProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
hana_instance_size: Optional[str] = None,
hardware_type: Optional[str] = None):
"""
Specifies the hardware settings for the HANA instance.
:param str hana_instance_size: Specifies the HANA instance SKU.
:param str hardware_type: Name of the hardware type (vendor and/or their product name)
"""
if hana_instance_size is not None:
pulumi.set(__self__, "hana_instance_size", hana_instance_size)
if hardware_type is not None:
pulumi.set(__self__, "hardware_type", hardware_type)
@property
@pulumi.getter(name="hanaInstanceSize")
def hana_instance_size(self) -> Optional[str]:
"""
Specifies the HANA instance SKU.
"""
return pulumi.get(self, "hana_instance_size")
@property
@pulumi.getter(name="hardwareType")
def hardware_type(self) -> Optional[str]:
"""
Name of the hardware type (vendor and/or their product name)
"""
return pulumi.get(self, "hardware_type")
@pulumi.output_type
class IpAddressResponse(dict):
"""
Specifies the IP address of the network interface.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ipAddress":
suggest = "ip_address"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IpAddressResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IpAddressResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IpAddressResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ip_address: Optional[str] = None):
"""
Specifies the IP address of the network interface.
:param str ip_address: Specifies the IP address of the network interface.
"""
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
Specifies the IP address of the network interface.
"""
return pulumi.get(self, "ip_address")
@pulumi.output_type
class NetworkProfileResponse(dict):
"""
Specifies the network settings for the HANA instance disks.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "circuitId":
suggest = "circuit_id"
elif key == "networkInterfaces":
suggest = "network_interfaces"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
circuit_id: Optional[str] = None,
network_interfaces: Optional[Sequence['outputs.IpAddressResponse']] = None):
"""
Specifies the network settings for the HANA instance disks.
:param str circuit_id: Specifies the circuit id for connecting to express route.
:param Sequence['IpAddressResponse'] network_interfaces: Specifies the network interfaces for the HANA instance.
"""
if circuit_id is not None:
pulumi.set(__self__, "circuit_id", circuit_id)
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
@property
@pulumi.getter(name="circuitId")
def circuit_id(self) -> Optional[str]:
"""
Specifies the circuit id for connecting to express route.
"""
return pulumi.get(self, "circuit_id")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[Sequence['outputs.IpAddressResponse']]:
"""
Specifies the network interfaces for the HANA instance.
"""
return pulumi.get(self, "network_interfaces")
@pulumi.output_type
class OSProfileResponse(dict):
"""
Specifies the operating system settings for the HANA instance.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computerName":
suggest = "computer_name"
elif key == "osType":
suggest = "os_type"
elif key == "sshPublicKey":
suggest = "ssh_public_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OSProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OSProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OSProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
computer_name: Optional[str] = None,
os_type: Optional[str] = None,
ssh_public_key: Optional[str] = None,
version: Optional[str] = None):
"""
Specifies the operating system settings for the HANA instance.
:param str computer_name: Specifies the host OS name of the HANA instance.
:param str os_type: This property allows you to specify the type of the OS.
:param str ssh_public_key: Specifies the SSH public key used to access the operating system.
:param str version: Specifies version of operating system.
"""
if computer_name is not None:
pulumi.set(__self__, "computer_name", computer_name)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if ssh_public_key is not None:
pulumi.set(__self__, "ssh_public_key", ssh_public_key)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="computerName")
def computer_name(self) -> Optional[str]:
"""
Specifies the host OS name of the HANA instance.
"""
return pulumi.get(self, "computer_name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
This property allows you to specify the type of the OS.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="sshPublicKey")
def ssh_public_key(self) -> Optional[str]:
"""
Specifies the SSH public key used to access the operating system.
"""
return pulumi.get(self, "ssh_public_key")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
Specifies version of operating system.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class StorageProfileResponse(dict):
"""
Specifies the storage settings for the HANA instance disks.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nfsIpAddress":
suggest = "nfs_ip_address"
elif key == "osDisks":
suggest = "os_disks"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StorageProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StorageProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StorageProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
nfs_ip_address: Optional[str] = None,
os_disks: Optional[Sequence['outputs.DiskResponse']] = None):
"""
Specifies the storage settings for the HANA instance disks.
:param str nfs_ip_address: IP Address to connect to storage.
:param Sequence['DiskResponse'] os_disks: Specifies information about the operating system disk used by the hana instance.
"""
if nfs_ip_address is not None:
pulumi.set(__self__, "nfs_ip_address", nfs_ip_address)
if os_disks is not None:
pulumi.set(__self__, "os_disks", os_disks)
@property
@pulumi.getter(name="nfsIpAddress")
def nfs_ip_address(self) -> Optional[str]:
"""
IP Address to connect to storage.
"""
return pulumi.get(self, "nfs_ip_address")
@property
@pulumi.getter(name="osDisks")
def os_disks(self) -> Optional[Sequence['outputs.DiskResponse']]:
"""
Specifies information about the operating system disk used by the hana instance.
"""
return pulumi.get(self, "os_disks")
| 34.291553
| 197
| 0.626857
|
ed1247ca00d325b3abde76f5cb8ce083eee97e7e
| 36,417
|
py
|
Python
|
tests/acceptance/test_Urls.py
|
AntonKuzminRussia/web-scout
|
5b8fed2c5917c9ecc210052703a65f1204f4b347
|
[
"MIT"
] | 6
|
2017-10-11T18:56:05.000Z
|
2019-09-29T21:45:05.000Z
|
tests/acceptance/test_Urls.py
|
AntonKuzminRussia/web-scout
|
5b8fed2c5917c9ecc210052703a65f1204f4b347
|
[
"MIT"
] | 3
|
2021-03-31T19:17:30.000Z
|
2021-12-13T20:16:23.000Z
|
tests/acceptance/test_Urls.py
|
AntonKuzminRussia/web-scout
|
5b8fed2c5917c9ecc210052703a65f1204f4b347
|
[
"MIT"
] | null | null | null |
import subprocess
import os
import time
import re
runPath = os.path.realpath(os.path.dirname(os.path.abspath(__file__)) + '/../../')
#TODO tests re as re, not as str only
class Test_Urls(object):
dict_path = '/tmp/wstest.dict'
headers_file_path = '/tmp/wstest.headers_file'
def get_results_count(self, output):
return len(re.findall('^(\d+ http)', output, re.M))
def test_dict(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-simple-dict-a.php\naaa\ndafs-simple-dict-b.php\nbbb\ndafs-simple-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-simple-dict-a.php") == 1
assert output.count("/dafs-simple-dict-b.php") == 1
assert output.count("/dafs-simple-dict-9.php") == 1
def test_mask(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--template',
'http://wsat.local/dafs-simple-mask-@.php',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-simple-mask-a.php") == 1
assert output.count("/dafs-simple-mask-b.php") == 1
assert output.count("/dafs-simple-mask-9.php") == 1
def test_comb(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-simpleaaa-\ndafs-simple-comb-\ndafs-simplebbb-")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 4
assert output.count("/dafs-simple-comb-a.php") == 1
assert output.count("/dafs-simple-comb-b.php") == 1
assert output.count("/dafs-simple-comb-9.php") == 1
assert output.count("/dafs-simple-comb-x.php") == 1
def test_not_found_re_dict(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-not-found-re-dict-a.php\ndafs-not-found-reaaa\ndafs-not-found-re-dict-b.php\ndafs-not-found-rebbb\ndafs-not-found-re-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--template',
'http://wsat.local/@',
'--not-found-re',
'always 200',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-not-found-re-dict-a.php") == 1
assert output.count("/dafs-not-found-re-dict-b.php") == 1
assert output.count("/dafs-not-found-re-dict-9.php") == 1
def test_not_found_re_mask(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--template',
'http://wsat.local/dafs-not-found-re-mask-@.php',
'--not-found-re',
'always 200',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-not-found-re-mask-a.php") == 1
assert output.count("/dafs-not-found-re-mask-b.php") == 1
assert output.count("/dafs-not-found-re-mask-9.php") == 1
def test_not_found_re_comb(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-not-found-re-aaa\ndafs-not-found-re-comb-\ndafs-not-found-re-bbb")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--template',
'http://wsat.local/@.php',
'--not-found-re',
'always 200',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-not-found-re-comb-a.php") == 1
assert output.count("/dafs-not-found-re-comb-b.php") == 1
assert output.count("/dafs-not-found-re-comb-9.php") == 1
def test_found_re_dict(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-found-re-dict-a.php\nfreaaa\ndafs-found-re-dict-b.php\nfrebbb\ndafs-found-re-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--template',
'http://wsat.local/@',
'--found-re',
'Really',
'--dict',
self.dict_path
])
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-found-re-dict-a.php") == 1
assert output.count("/dafs-found-re-dict-b.php") == 1
assert output.count("/dafs-found-re-dict-9.php") == 1
def test_found_re_mask(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--template',
'http://wsat.local/dafs-found-re-mask-@.php',
'--found-re',
'Really',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-found-re-mask-a.php") == 1
assert output.count("/dafs-found-re-mask-b.php") == 1
assert output.count("/dafs-found-re-mask-9.php") == 1
def test_found_re_comb(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-found-re-aaa\ndafs-found-re-comb-\ndafs-found-re-bbb")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--template',
'http://wsat.local/@.php',
'--found-re',
'Really',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-found-re-comb-a.php") == 1
assert output.count("/dafs-found-re-comb-b.php") == 1
assert output.count("/dafs-found-re-comb-9.php") == 1
def test_not_found_size_dict(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-not-found-size-dict-a.php\ndafs-not-found-sizeaaa\ndafs-not-found-size-dict-b.php\ndafs-not-found-sizebbb\ndafs-not-found-size-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--template',
'http://wsat.local/@',
'--not-found-size',
'20',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-not-found-size-dict-a.php") == 1
assert output.count("/dafs-not-found-size-dict-b.php") == 1
assert output.count("/dafs-not-found-size-dict-9.php") == 1
def test_not_found_size_mask(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--template',
'http://wsat.local/dafs-not-found-size-mask-@.php',
'--not-found-size',
'20',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-not-found-size-mask-a.php") == 1
assert output.count("/dafs-not-found-size-mask-b.php") == 1
assert output.count("/dafs-not-found-size-mask-9.php") == 1
def test_not_found_size_comb(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-not-found-sizeaaa\ndafs-not-found-size-comb-\ndafs-not-found-sizebbb")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--template',
'http://wsat.local/@.php',
'--not-found-size',
'20',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-not-found-size-comb-a.php") == 1
assert output.count("/dafs-not-found-size-comb-b.php") == 1
assert output.count("/dafs-not-found-size-comb-9.php") == 1
def test_not_found_codes_dict(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-not-found-codes-dict-a.php\ndafs-not-found-codesaaa\ndafs-not-found-codes-dict-b.php\ndafs-not-found-codesbbb\ndafs-not-found-codes-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--template',
'http://wsat.local/@',
'--not-found-codes',
'200',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-not-found-codes-dict-a.php") == 1
assert output.count("/dafs-not-found-codes-dict-b.php") == 1
assert output.count("/dafs-not-found-codes-dict-9.php") == 1
def test_not_found_codes_mask(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--template',
'http://wsat.local/dafs-not-found-codes-mask-@.php',
'--not-found-codes',
'200',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-not-found-codes-mask-a.php") == 1
assert output.count("/dafs-not-found-codes-mask-b.php") == 1
assert output.count("/dafs-not-found-codes-mask-9.php") == 1
def test_not_found_codes_comb(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-not-found-codes-aaa\ndafs-not-found-codes-comb-\ndafs-not-found-codes-bbb")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--template',
'http://wsat.local/@.php',
'--not-found-codes',
'200',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-not-found-codes-comb-a.php") == 1
assert output.count("/dafs-not-found-codes-comb-b.php") == 1
assert output.count("/dafs-not-found-codes-comb-9.php") == 1
def test_dict_ignore_words(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-simple-dict-a.php\naaa\ndafs-simple-dict-b.php\nbbb\ndafs-simple-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--ignore-words-re',
'b',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 2
assert output.count("/dafs-simple-dict-a.php") == 1
assert output.count("/dafs-simple-dict-9.php") == 1
def test_mask_ignore_words(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--ignore-words-re',
'b',
'--template',
'http://wsat.local/dafs-simple-mask-@.php',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 2
assert output.count("/dafs-simple-mask-a.php") == 1
assert output.count("/dafs-simple-mask-9.php") == 1
def test_comb_ignore_words(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-simpleaaa\ndafs-simple-comb-\ndafs-simplebbb")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--ignore-words-re',
'x', #TODO fix here
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-simple-comb-a.php") == 1
assert output.count("/dafs-simple-comb-b.php") == 1
assert output.count("/dafs-simple-comb-9.php") == 1
assert output.count("/dafs-simple-comb-x.php") == 0
def test_dict_retest_codes(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-retest-codes-dict-a.php\naaa\ndafs-retest-codes-dict-b.php\nbbb\ndafs-retest-codes-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--retest-codes',
'503',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-retest-codes-dict-a.php") == 1
assert output.count("/dafs-retest-codes-dict-b.php") == 1
assert output.count("/dafs-retest-codes-dict-9.php") == 1
def test_mask_retest_codes(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--retest-codes',
'503',
'--template',
'http://wsat.local/dafs-retest-codes-mask-@.php',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-retest-codes-mask-a.php") == 1
assert output.count("/dafs-retest-codes-mask-b.php") == 1
assert output.count("/dafs-retest-codes-mask-9.php") == 1
def test_comb_retest_codes(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-retest-codesaaa\ndafs-retest-codes-comb-\ndafs-retest-codesbbb")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--retest-codes',
'503',
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-retest-codes-comb-a.php") == 1
assert output.count("/dafs-retest-codes-comb-b.php") == 1
assert output.count("/dafs-retest-codes-comb-9.php") == 1
def test_dict_retest_re(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-retest-re-dict-a.php\naaa\ndafs-retest-re-dict-b.php\nbbb\ndafs-retest-re-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--retest-re',
'unavailable',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-retest-re-dict-a.php") == 1
assert output.count("/dafs-retest-re-dict-b.php") == 1
assert output.count("/dafs-retest-re-dict-9.php") == 1
def test_mask_retest_re(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--retest-re',
'unavailable',
'--template',
'http://wsat.local/dafs-retest-re-mask-@.php',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-retest-re-mask-a.php") == 1
assert output.count("/dafs-retest-re-mask-b.php") == 1
assert output.count("/dafs-retest-re-mask-9.php") == 1
def test_comb_retest_re(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-retest-re-aaa\ndafs-retest-re-comb-\ndafs-retest-re-bbb")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--retest-re',
'unavailable',
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-retest-re-comb-a.php") == 1
assert output.count("/dafs-retest-re-comb-b.php") == 1
assert output.count("/dafs-retest-re-comb-9.php") == 1
def test_dict_delay(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-simple-dict-a.php\naaa\ndafs-simple-dict-b.php\nbbb\ndafs-simple-dict-9.php\n")
fh.close()
stime = int(time.time())
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--threads',
'1',
'--delay',
'2',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
etime = int(time.time())
print(output)
assert etime-stime > 10
def test_mask_delay(self):
stime = int(time.time())
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--threads',
'1',
'--delay',
'1',
'--template',
'http://wsat.local/@',
'--mask',
'?d,1,1',
])
etime = int(time.time())
print(output)
output = output.decode("utf8")
assert etime-stime > 10
def test_comb_delay(self):
fh = open(self.dict_path, 'w')
fh.write("test\n")
fh.close()
stime = int(time.time())
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--threads',
'1',
'--delay',
'1',
'--template',
'http://wsat.local/@',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
etime = int(time.time())
print(output)
assert etime-stime > 10
def test_dict_selenium(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-simple-dict-a.php\ndafs-simpleaaa\ndafs-simple-dict-b.php\nsimplebbb\ndafs-simple-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--selenium',
'1',
'--not-found-re',
'<h1>404 Not Found</h1>',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-simple-dict-a.php") == 1
assert output.count("/dafs-simple-dict-b.php") == 1
assert output.count("/dafs-simple-dict-9.php") == 1
def test_mask_selenium(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--selenium',
'1',
'--not-found-re',
'<h1>404 Not Found</h1>',
'--template',
'http://wsat.local/dafs-simple-mask-@.php',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-simple-mask-a.php") == 1
assert output.count("/dafs-simple-mask-b.php") == 1
assert output.count("/dafs-simple-mask-9.php") == 1
def test_comb_selenium(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-simple-comb-\ndafs-simple-aaa\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--selenium',
'1',
'--not-found-re',
'<h1>404 Not Found</h1>',
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 4
assert output.count("/dafs-simple-comb-a.php") == 1
assert output.count("/dafs-simple-comb-b.php") == 1
assert output.count("/dafs-simple-comb-9.php") == 1
assert output.count("/dafs-simple-comb-x.php") == 1
def test_dict_selenium_not_found_size(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-simple-dict-a.php\nsimpleaaa\ndafs-simple-dict-b.php\nsimplebbb\ndafs-simple-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--selenium',
'1',
'--not-found-size',
'61',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-simple-dict-a.php") == 1
assert output.count("/dafs-simple-dict-b.php") == 1
assert output.count("/dafs-simple-dict-9.php") == 1
def test_mask_selenium_not_found_size(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--selenium',
'1',
'--not-found-size',
'61',
'--template',
'http://wsat.local/dafs-simple-mask-@.php',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-simple-mask-a.php") == 1
assert output.count("/dafs-simple-mask-b.php") == 1
assert output.count("/dafs-simple-mask-9.php") == 1
def test_comb_selenium_not_found_size(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-simple-comb-\nsimpleaaa\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--selenium',
'1',
'--not-found-size',
'61',
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 4
assert output.count("/dafs-simple-comb-a.php") == 1
assert output.count("/dafs-simple-comb-b.php") == 1
assert output.count("/dafs-simple-comb-9.php") == 1
assert output.count("/dafs-simple-comb-x.php") == 1
def test_dict_selenium_wait_re(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-selenium-browser-wait-re-dict-a.php\naaa\ndafs-selenium-browser-wait-re-dict-b.php\nbbb\ndafs-selenium-browser-wait-re-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--selenium',
'1',
'--not-found-re',
'404 Not Found',
'--browser-wait-re',
'Checking your browser',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 2
assert output.count("/dafs-selenium-browser-wait-re-dict-a.php") == 1
assert output.count("/dafs-selenium-browser-wait-re-dict-b.php") == 1
def test_mask_selenium_wait_re(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--selenium',
'1',
'--not-found-re',
'404 Not Found',
'--browser-wait-re',
'Checking your browser',
'--template',
'http://wsat.local/dafs-selenium-browser-wait-re-mask-@.php',
'--mask',
'?l?d,1,1',
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 2
assert output.count("/dafs-selenium-browser-wait-re-mask-a.php") == 1
assert output.count("/dafs-selenium-browser-wait-re-mask-b.php") == 1
def test_comb_selenium_wait_re(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-selenium-browser-wait-re-dict-\naaa")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--combine-template',
'%d%%m%',
'--selenium',
'1',
'--not-found-re',
'404 Not Found',
'--browser-wait-re',
'Checking your browser',
'--template',
'http://wsat.local/@.php',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 2
assert output.count("/dafs-selenium-browser-wait-re-dict-a.php") == 1
assert output.count("/dafs-selenium-browser-wait-re-dict-b.php") == 1
def test_dict_headers_file(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-headers-file-dict-a.php\nheadersaaa\ndafs-headers-file-dict-b.php\nheadersbbb\ndafs-headers-file-dict-9.php\n")
fh.close()
fh = open(self.headers_file_path, 'w')
fh.write("Cookie: a=b\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--headers-file',
self.headers_file_path,
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path,
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-headers-file-dict-a.php") == 1
assert output.count("/dafs-headers-file-dict-b.php") == 1
assert output.count("/dafs-headers-file-dict-9.php") == 1
def test_mask_headers_file(self):
fh = open(self.headers_file_path, 'w')
fh.write("Cookie: a=b\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--headers-file',
self.headers_file_path,
'--template',
'http://wsat.local/dafs-headers-file-mask-@.php',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode('utf8')
assert self.get_results_count(output) == 3
assert output.count("/dafs-headers-file-mask-a.php") == 1
assert output.count("/dafs-headers-file-mask-b.php") == 1
assert output.count("/dafs-headers-file-mask-9.php") == 1
def test_comb_headers_file(self):
fh = open(self.dict_path, 'w')
fh.write("headersaaa\ndafs-headers-file-comb-\nheadersbbb")
fh.close()
fh = open(self.headers_file_path, 'w')
fh.write("Cookie: a=b\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--headers-file',
self.headers_file_path,
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-headers-file-comb-a.php") == 1
assert output.count("/dafs-headers-file-comb-b.php") == 1
assert output.count("/dafs-headers-file-comb-9.php") == 1
def test_dict_method_post(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-post-dict-a.php\naaa\ndafs-post-dict-b.php\nbbb\ndafs-post-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--method',
'POST',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-post-dict-a.php") == 1
assert output.count("/dafs-post-dict-b.php") == 1
assert output.count("/dafs-post-dict-9.php") == 1
def test_mask_method_post(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--method',
'POST',
'--template',
'http://wsat.local/dafs-post-mask-@.php',
'--mask',
'?l?d,1,1',
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-post-mask-a.php") == 1
assert output.count("/dafs-post-mask-b.php") == 1
assert output.count("/dafs-post-mask-9.php") == 1
def test_comb_method_post(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-post-comb-\naaa\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--method',
'POST',
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-post-comb-a.php") == 1
assert output.count("/dafs-post-comb-b.php") == 1
assert output.count("/dafs-post-comb-9.php") == 1
def test_dict_method_head(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-head-dict-a.php\naaa\ndafs-head-dict-b.php\nbbb\ndafs-head-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--method',
'HEAD',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-head-dict-a.php") == 1
assert output.count("/dafs-head-dict-b.php") == 1
assert output.count("/dafs-head-dict-9.php") == 1
def test_mask_method_head(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--method',
'HEAD',
'--template',
'http://wsat.local/dafs-head-mask-@.php',
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-head-mask-a.php") == 1
assert output.count("/dafs-head-mask-b.php") == 1
assert output.count("/dafs-head-mask-9.php") == 1
def test_comb_method_head(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-head-comb-\naaa\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--method',
'HEAD',
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-head-comb-a.php") == 1
assert output.count("/dafs-head-comb-b.php") == 1
assert output.count("/dafs-head-comb-9.php") == 1
def test_dict_method_get_default(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-get-dict-a.php\naaa\ndafs-get-dict-b.php\nbbb\ndafs-get-dict-9.php\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsDict',
'--template',
'http://wsat.local/@',
'--dict',
self.dict_path
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-get-dict-a.php") == 1
assert output.count("/dafs-get-dict-b.php") == 1
assert output.count("/dafs-get-dict-9.php") == 1
def test_mask_method_get_default(self):
output = subprocess.check_output([
'./ws.py',
'UrlsMask',
'--template',
'http://wsat.local/dafs-get-mask-@.php',
'--mask',
'?l?d,1,1',
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-get-mask-a.php") == 1
assert output.count("/dafs-get-mask-b.php") == 1
assert output.count("/dafs-get-mask-9.php") == 1
def test_comb_method_get_default(self):
fh = open(self.dict_path, 'w')
fh.write("dafs-get-comb-\naaa\n")
fh.close()
output = subprocess.check_output([
'./ws.py',
'UrlsCombine',
'--template',
'http://wsat.local/@.php',
'--combine-template',
'%d%%m%',
'--dict',
self.dict_path,
'--mask',
'?l?d,1,1'
])
print(output)
output = output.decode("utf8")
assert self.get_results_count(output) == 3
assert output.count("/dafs-get-comb-a.php") == 1
assert output.count("/dafs-get-comb-b.php") == 1
assert output.count("/dafs-get-comb-9.php") == 1
| 33.287934
| 169
| 0.5046
|
c812e6345ce23046dfab6e30c21976fdde2c7039
| 912
|
py
|
Python
|
make_recog_data.py
|
piruty/voice_actor_recog
|
e13abd51fc88f841b1bfb49c29def397014cfa22
|
[
"MIT"
] | 4
|
2017-06-15T05:49:56.000Z
|
2019-03-24T15:25:06.000Z
|
make_recog_data.py
|
piruty/voice_actor_recog
|
e13abd51fc88f841b1bfb49c29def397014cfa22
|
[
"MIT"
] | null | null | null |
make_recog_data.py
|
piruty/voice_actor_recog
|
e13abd51fc88f841b1bfb49c29def397014cfa22
|
[
"MIT"
] | null | null | null |
import subprocess
import time
from progressbar import ProgressBar
import make_mfcc_data
def cmd(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.rstrip()
dirs = cmd("ls voice2")
labels = dirs.splitlines()
if 'doc' not in labels:
cmd("mkdir doc")
recog = open('doc/recog.txt', 'w')
progress_bar = ProgressBar(len(labels))
for class_no, label in enumerate(labels):
progress_bar.update(class_no+1)
time.sleep(0.01)
work_dir = 'voice2/' + label
voice_files = cmd('ls ' + work_dir + '/*.wav')
voices = voice_files.splitlines()
for index, voice in enumerate(voices):
ceps = make_mfcc_data.convert_to_mfcc(voice)
if ceps is None:
continue
for data in ceps[0]:
recog.write('%s ' % data)
recog.write('\n')
recog.close()
| 20.727273
| 89
| 0.652412
|
2d5c5b642eb68a87a1808f1daa019d08c9457180
| 2,081
|
py
|
Python
|
application/workprogramsapp/permissions.py
|
anastasiiaCher/analytics_backend
|
4c9c5964e80a1e1c1e8a20bfdeaf6b891b6ba9ea
|
[
"MIT"
] | null | null | null |
application/workprogramsapp/permissions.py
|
anastasiiaCher/analytics_backend
|
4c9c5964e80a1e1c1e8a20bfdeaf6b891b6ba9ea
|
[
"MIT"
] | null | null | null |
application/workprogramsapp/permissions.py
|
anastasiiaCher/analytics_backend
|
4c9c5964e80a1e1c1e8a20bfdeaf6b891b6ba9ea
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
from workprogramsapp.expertise.models import UserExpertise
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
class IsRpdDeveloperOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_permission(self, request, view):
if request.method in permissions.SAFE_METHODS:
return True
return request.user.is_rpd_developer == True
class IsExpertiseMaster(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_expertise_master
class IsMemberOfExpertise(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_expertise_master:
return True
if 'pk' in dict(view.kwargs):
return UserExpertise.objects.filter(expert=request.user, expertise=view.kwargs['pk'])
else:
return UserExpertise.objects.filter(expert=request.user)
class IsWorkProgramMemberOfExpertise(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_expertise_master:
return True
if 'pk' in dict(view.kwargs):
return UserExpertise.objects.filter(expert=request.user, expertise__work_program=view.kwargs['pk'])
else:
return UserExpertise.objects.filter(expert=request.user)
class IsMemberOfUserExpertise(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_expertise_master:
return True
if 'pk' in dict(view.kwargs):
return UserExpertise.objects.filter(expert=request.user, pk=view.kwargs['pk'])
else:
return UserExpertise.objects.filter(expert=request.user)
| 33.564516
| 111
| 0.702547
|
1d68cc20a4415b266010a969423302961650af64
| 9,152
|
py
|
Python
|
assignments/assignment3/cs231n/data_utils.py
|
Zx55/cs231n-2019
|
448f177880bebd77abf444d1847b9059b8252c55
|
[
"MIT"
] | null | null | null |
assignments/assignment3/cs231n/data_utils.py
|
Zx55/cs231n-2019
|
448f177880bebd77abf444d1847b9059b8252c55
|
[
"MIT"
] | null | null | null |
assignments/assignment3/cs231n/data_utils.py
|
Zx55/cs231n-2019
|
448f177880bebd77abf444d1847b9059b8252c55
|
[
"MIT"
] | null | null | null |
from six.moves import cPickle as pickle
import numpy as np
import os
from scipy.misc import imread
import platform
def load_pickle(f):
version = platform.python_version_tuple()
if version[0] == '2':
return pickle.load(f)
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000,
subtract_mean=True):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
"""
Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and
TinyImageNet-200 have the same directory structure, so this can be used
to load any of them.
Inputs:
- path: String giving path to the directory to load.
- dtype: numpy datatype used to load the data.
- subtract_mean: Whether to subtract the mean training image.
Returns: A dictionary with the following entries:
- class_names: A list where class_names[i] is a list of strings giving the
WordNet names for class i in the loaded dataset.
- X_train: (N_tr, 3, 64, 64) array of training images
- y_train: (N_tr,) array of training labels
- X_val: (N_val, 3, 64, 64) array of validation images
- y_val: (N_val,) array of validation labels
- X_test: (N_test, 3, 64, 64) array of testing images.
- y_test: (N_test,) array of test labels; if test labels are not available
(such as in student code) then y_test will be None.
- mean_image: (3, 64, 64) array giving mean training image
"""
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.items():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print('loading training data for synset %d / %d'
% (i + 1, len(wnids)))
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * \
np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]]
for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
def load_models(models_dir):
"""
Load saved models from disk. This will attempt to unpickle all files in a
directory; any files that give errors on unpickling (such as README.txt)
will be skipped.
Inputs:
- models_dir: String giving the path to a directory containing model files.
Each model file is a pickled dictionary with a 'model' field.
Returns:
A dictionary mapping model file names to models.
"""
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = load_pickle(f)['model']
except pickle.UnpicklingError:
continue
return models
def load_imagenet_val(num=None):
"""Load a handful of validation images from ImageNet.
Inputs:
- num: Number of images to load (max of 25)
Returns:
- X: numpy array with shape [num, 224, 224, 3]
- y: numpy array of integer image labels, shape [num]
- class_names: dict mapping integer label to class name
"""
imagenet_fn = 'cs231n/datasets/imagenet_val_25.npz'
if not os.path.isfile(imagenet_fn):
print('file %s not found' % imagenet_fn)
print('Run the following:')
print('cd cs231n/datasets')
print('bash get_imagenet_val.sh')
assert False, 'Need to download imagenet_val_25.npz'
f = np.load(imagenet_fn, allow_pickle=True)
X = f['X']
y = f['y']
class_names = f['label_map'].item()
if num is not None:
X = X[:num]
y = y[:num]
return X, y, class_names
| 34.798479
| 79
| 0.611014
|
22f2f20f825e529f60cf0292a185659398700988
| 3,835
|
py
|
Python
|
app/views.py
|
congjinruo/JulyNovel
|
feff0adfecab1c21728fc177c94621b9b8707bbd
|
[
"Apache-2.0"
] | 5
|
2018-03-05T02:32:53.000Z
|
2020-10-27T13:13:59.000Z
|
app/views.py
|
congjinruo/JulyNovel
|
feff0adfecab1c21728fc177c94621b9b8707bbd
|
[
"Apache-2.0"
] | null | null | null |
app/views.py
|
congjinruo/JulyNovel
|
feff0adfecab1c21728fc177c94621b9b8707bbd
|
[
"Apache-2.0"
] | 1
|
2019-02-20T03:04:22.000Z
|
2019-02-20T03:04:22.000Z
|
"""
Routes and views for the flask application.
"""
# -*- coding: utf-8 -*-
import os
import requests
from config import Config
from urllib import parse
from flask import Flask, render_template, redirect, url_for, send_from_directory,request
from flask_graphql import GraphQLView
from .data.base import db_session
from .data.schema import schema
from app import create_app as app
from flask_cors import CORS
from .services.spider import Spider
import threading, time
from .utils.operate_oss import OSS
from .utils.operate_db import DBUtil
CORS(app, supports_credentials=True, origins="https://www.kuaijiajin.club")
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql = True, context={'session': db_session}))
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/login/<key>', methods=["GET", "POST"])
def login(key):
"""
OSS 删除匹配key的所有文件
"""
if key is None:
return 'fail'
print(key)
if key == 'github':
print(request.args.get('code'))
parar={'client_id': Config.GITHUB_CLIENT_ID, 'client_secret': Config.GITHUB_CLIENT_SECRET, 'code': request.args.get('code')}
headers={
'Accept':'application/json'
}
r = requests.post('https://github.com/login/oauth/access_token',params = parar, headers=headers)
print(r.text)
access_token = r.json()
print(access_token)
userInfo = requests.get('https://api.github.com/user?access_token=' + access_token['access_token'])
return userInfo.text
return 'success'
@app.route('/missionStart/<key>')
def missionStart(key):
if key is None:
key = 1
i = 0
while(i < 30):
i += 1
t = threading.Thread(target=Spider(siteId=key).run, name='spiderMission %s' % i)
t.start()
si = threading.Thread(target=Spider(siteId=key).insert, name='insertMission %s' % i)
sc = threading.Thread(target=Spider(siteId=key).timerStart, name='checkFinish')
si.start()
sc.start()
return 'Mission Start ... '
@app.route('/uploadAll')
def uploadAll():
'''
一键上传图片到OSS
'''
OSS().upload_all_image()
return 'success'
@app.route('/getIPList')
def getIPList():
'''
获取ip所属地区
'''
path = 'D:/DingTalk/ipList.txt'
res = ''
with open(path,'r') as f:
content = f.read()
ipList = content.split(';')
for ip in ipList:
if ip is '':
continue
else:
res += getIPLocation(ip) + ';'
return res
return 'fail'
def getIPLocation(ip):
parar={'query': ip, 'resource_id': '6006', 'ie': 'utf8', 'format':'json'}
headers={
'Accept':'application/json'
}
r = requests.get('https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php',params = parar, headers=headers)
response_str = r.json()
data = response_str['data']
location = '出错了:'
if response_str['status'] == '0':
item = data[0]
location = item['location']
else:
location += ip
return location
@app.route('/delete/<key>')
def delete(key):
"""
OSS 删除匹配key的所有文件
"""
if key is None:
return 'fail'
OSS().delete_object(key)
return 'success'
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/robots.txt')
def robots():
return send_from_directory(os.path.join(app.root_path, 'static'),
'robots.txt')
@app.route('/test')
def test():
db_util = DBUtil()
db_util.resort_chapters()
return 'testing ...'
| 26.267123
| 135
| 0.625033
|
29dcd3ff89fd7350d9335f5fa4482cad4af3e2e8
| 14,944
|
py
|
Python
|
tests/test_cohort_preview_manifest.py
|
ImagingDataCommons/ISB-CGC-API
|
d3207a6ada958ad5ac3cb8834b216ec1108dcf1c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cohort_preview_manifest.py
|
ImagingDataCommons/ISB-CGC-API
|
d3207a6ada958ad5ac3cb8834b216ec1108dcf1c
|
[
"Apache-2.0"
] | 5
|
2020-07-07T23:45:21.000Z
|
2021-06-11T17:52:29.000Z
|
tests/test_cohort_preview_manifest.py
|
ImagingDataCommons/ISB-CGC-API
|
d3207a6ada958ad5ac3cb8834b216ec1108dcf1c
|
[
"Apache-2.0"
] | 2
|
2019-10-15T01:17:35.000Z
|
2021-04-05T19:54:12.000Z
|
#
# Copyright 2020, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
from python_settings import settings
settings.BQ_MAX_ATTEMPTS=25
from tests.cohort_utils import merge, pretty_print_cohortObjects, create_cohort_for_test_get_cohort_xxx, delete_cohort
def test_guid(client, app):
filters = {
"collection_id": ["TCGA-READ"],
"Modality": ["CT", "MR"],
"race": ["WHITE"],
}
cohortSpec = {"name": "testcohort",
"description": "Test description",
"filters": filters}
mimetype = ' application/json'
headers = {
'Content-Type': mimetype,
'Accept': mimetype
}
query_string = {
'sql': False,
'CRDC_Study_GUID': True,
'CRDC_Series_GUID':True,
'CRDC_Instance_GUID': True,
'page_size': 2000,
}
# Get a guid manifest of the cohort's instances
response = client.post('v1/cohorts/manifest/preview',
query_string = query_string,
data = json.dumps(cohortSpec),
headers=headers)
assert response.content_type == 'application/json'
assert response.status_code == 200
cohort = response.json['cohort']
manifest = response.json['manifest']
assert manifest['rowsReturned'] == 1638
next_page = response.json['next_page']
assert next_page == ""
json_manifest = manifest['json_manifest']
assert len(json_manifest) == 1638
assert manifest['totalFound'] == 1638
assert 'dg.4DFC/0013f110-0928-4d66-ba61-7c3e80b48a68' in [row['CRDC_Instance_GUID'] for row in json_manifest]
def test_url(client, app):
filters = {
"collection_id": ["tcga_read"],
"Modality": ["CT", "MR"],
"race": ["WHITE"]}
cohortSpec = {"name": "testcohort",
"description": "Test description",
"filters": filters}
mimetype = ' application/json'
headers = {
'Content-Type': mimetype,
'Accept': mimetype
}
query_string = {
'GCS_URL': True,
'page_size': 2000,
}
# Get a guid manifest of the cohort's instances
response = client.post('v1/cohorts/manifest/preview',
query_string = query_string,
data = json.dumps(cohortSpec),
headers=headers)
assert response.content_type == 'application/json'
assert response.status_code == 200
cohort = response.json['cohort']
manifest = response.json['manifest']
assert manifest['rowsReturned'] == 1638
next_page = response.json['next_page']
assert next_page == ""
json_manifest = manifest['json_manifest']
assert len(json_manifest) == 1638
assert manifest['totalFound'] == 1638
assert {'GCS_URL': 'gs://idc_dev/0013f110-0928-4d66-ba61-7c3e80b48a68.dcm'} in json_manifest
def test_SOPInstanceUID(client, app):
filters = {
"collection_id": ["tcga_read"],
"Modality": ["CT", "MR"],
"race": ["WHITE"],
"SOPInstanceUID": ["1.3.6.1.4.1.14519.5.2.1.3671.4018.101814896314793708382026281597"]}
cohortSpec = {"name": "testcohort",
"description": "Test description",
"filters": filters}
mimetype = ' application/json'
headers = {
'Content-Type': mimetype,
'Accept': mimetype
}
query_string = {
'GCS_URL': True,
'page_size': 2000,
}
# Get a guid manifest of the cohort's instances
response = client.post('v1/cohorts/manifest/preview',
query_string = query_string,
data = json.dumps(cohortSpec),
headers=headers)
assert response.content_type == 'application/json'
assert response.status_code == 200
cohort = response.json['cohort']
manifest = response.json['manifest']
assert manifest['rowsReturned'] == 1
next_page = response.json['next_page']
assert next_page == ""
json_manifest = manifest['json_manifest']
assert len(json_manifest) == 1
assert manifest['totalFound'] == 1
assert {'GCS_URL': 'gs://idc_dev/de364433-4eaf-440e-b714-6c8b7cf3c613.dcm'} in json_manifest
def test_all(client, app):
filters = {
"collection_id": ["tcga_read"],
"Modality": ["CT", "MR"],
"race": ["WHITE"]}
cohortSpec = {"name": "testcohort",
"description": "Test description",
"filters": filters}
mimetype = ' application/json'
headers = {
'Content-Type': mimetype,
'Accept': mimetype
}
query_string = dict(
sql=True,
Collection_ID=True,
Patient_ID=True,
StudyInstanceUID=True,
SeriesInstanceUID=True,
SOPInstanceUID=True,
Source_DOI=True,
CRDC_Study_GUID=True,
CRDC_Series_GUID=True,
CRDC_Instance_GUID=True,
GCS_URL=True,
page_size=2000
)
response = client.post('v1/cohorts/manifest/preview',
query_string = query_string,
data = json.dumps(cohortSpec),
headers=headers)
assert response.content_type == 'application/json'
assert response.status_code == 200
cohort = response.json['cohort']
manifest = response.json['manifest']
assert manifest['rowsReturned'] == 1638
next_page = response.json['next_page']
assert next_page == ""
json_manifest = manifest['json_manifest']
assert len(json_manifest) == 1638
assert manifest['totalFound'] == 1638
assert 'TCGA-CL-5917' in [row['Patient_ID'] for row in json_manifest]
assert '1.3.6.1.4.1.14519.5.2.1.3671.4018.101814896314793708382026281597' in [row['SOPInstanceUID'] for row in json_manifest]
assert '1.3.6.1.4.1.14519.5.2.1.3671.4018.183714953600569164837490663631' in [row['SeriesInstanceUID'] for row in json_manifest]
assert '1.3.6.1.4.1.14519.5.2.1.3671.4018.768291480177931556369061239508' in [row['StudyInstanceUID'] for row in json_manifest]
assert 'tcga_read' in [row['Collection_ID'] for row in json_manifest]
assert '10.7937/K9/TCIA.2016.F7PPNPNU' in [row['Source_DOI'] for row in json_manifest]
assert next(row for row in json_manifest if row['GCS_URL'] == 'gs://idc_dev/0013f110-0928-4d66-ba61-7c3e80b48a68.dcm')
assert next(row for row in json_manifest if row['CRDC_Study_GUID'] == 'dg.4DFC/7efeae5d-6263-4184-9ad4-8df22720ada9')
assert next(row for row in json_manifest if row['CRDC_Series_GUID'] == 'dg.4DFC/67e22f90-36e1-40aa-88bb-9b2efb5616f2')
assert next(row for row in json_manifest if row['CRDC_Instance_GUID'] == 'dg.4DFC/0013f110-0928-4d66-ba61-7c3e80b48a68')
def test_paged_doi(client, app):
filters = {
"collection_id": ["tcga_luad"],
"Modality": ["CT", "MR"],
"race": ["WHITE"]}
cohortSpec = {"name": "testcohort",
"description": "Test description",
"filters": filters}
mimetype = ' application/json'
headers = {
'Content-Type': mimetype,
'Accept': mimetype
}
query_string = {
'CRDC_Instance_GUID': True,
'page_size': 5000
}
response = client.post('v1/cohorts/manifest/preview',
query_string = query_string,
data = json.dumps(cohortSpec),
headers=headers)
assert response.content_type == 'application/json'
assert response.status_code == 200
manifest = response.json['manifest']
next_page = response.json['next_page']
json_manifest = manifest['json_manifest']
assert len(json_manifest) == 5000
assert manifest['totalFound'] == 21940
assert manifest['rowsReturned'] ==5000
assert next_page
#Now get the remaining pages
complete_manifest = manifest['json_manifest']
totalRowsReturned = manifest['rowsReturned']
while next_page:
query_string = {
'next_page': next_page,
'page_size': 5000
}
response = client.get('v1/cohorts/manifest/nextPage',
query_string=query_string,
data=json.dumps(cohortSpec),
headers=headers)
assert response.content_type == 'application/json'
assert response.status_code == 200
manifest = response.json['manifest']
next_page = response.json['next_page']
totalRowsReturned += manifest["rowsReturned"]
complete_manifest.extend(manifest['json_manifest'])
assert 'dg.4DFC/0009e98e-bca2-4a68-ada1-62e0a8b2dbaf' in \
[row['CRDC_Instance_GUID'] for row in complete_manifest]
assert totalRowsReturned == manifest['totalFound']
assert manifest['totalFound'] == len(complete_manifest)
def test_paged_url(client, app):
filters = {
"tcia_species": ["Human"],
"collection_id": ["tcga_luad"],
"Modality": ["CT", "MR"],
"race": ["WHITE"]}
cohortSpec = {"name": "testcohort",
"description": "Test description",
"filters": filters}
mimetype = ' application/json'
headers = {
'Content-Type': mimetype,
'Accept': mimetype
}
query_string = {
'GCS_URL': True,
'page_size': 5000
}
response = client.post('v1/cohorts/manifest/preview',
query_string = query_string,
data = json.dumps(cohortSpec),
headers=headers)
assert response.content_type == 'application/json'
assert response.status_code == 200
cohort = response.json['cohort']
manifest = response.json['manifest']
next_page = response.json['next_page']
json_manifest = manifest['json_manifest']
assert len(json_manifest) == 5000
assert manifest['totalFound'] == 21940
assert manifest['rowsReturned'] == 5000
assert next_page
# Now get the remaining pages
complete_manifest = manifest['json_manifest']
totalRowsReturned = manifest['rowsReturned']
while next_page:
query_string = {
'next_page': next_page,
'page_size': 5000
}
response = client.get('v1/cohorts/manifest/nextPage',
query_string=query_string,
data=json.dumps(cohortSpec),
headers=headers)
assert response.content_type == 'application/json'
assert response.status_code == 200
manifest = response.json['manifest']
next_page = response.json['next_page']
totalRowsReturned += manifest["rowsReturned"]
complete_manifest.extend(manifest['json_manifest'])
assert {'GCS_URL': 'gs://idc_dev/0009e98e-bca2-4a68-ada1-62e0a8b2dbaf.dcm'} in json_manifest
assert totalRowsReturned == manifest['totalFound']
assert manifest['totalFound'] == len(complete_manifest)
# This test submits an empty filter which means that all instances are returned.
# Takes a lot of time and bandwidth. Uncomment to run
# To test timeout handling, you may need toset BQ_MAX_ATTEMPTS=0
# def test_paged_guid_all_instances(client, app):
#
# import time
#
# cohortSpec = {
# "name": "mycohort",
# "description": "Example description",
# "filters": {}
# }
# query_string = dict(
# GCS_URL = True,
# Source_DOI = True,
# SOPInstanceUID = True,
# SeriesInstanceUID = True,
# StudyInstanceUID = True,
# CRDC_Study_GUID = True,
# CRDC_Series_GUID = True,
# CRDC_Instance_GUID = True,
# page_size=40000000
# )
#
# mimetype = ' application/json'
# headers = {
# 'Content-Type': mimetype,
# 'Accept': mimetype
# }
#
# start = time.time()
#
# response = client.post('v1/cohorts/manifest/preview',
# query_string=query_string,
# data=json.dumps(cohortSpec),
# headers=headers)
#
# elapsed = time.time()-start
# totalTime = elapsed
#
# while response.status_code == 202:
# query_string = dict(
# next_page=response.json['next_page'],
# page_size=40000000
#
# )
#
# response = client.post('v1/cohorts/manifest/preview',
# query_string=query_string,
# data=json.dumps(cohortSpec),
# headers=headers)
#
# # Check that there wasn't an error with the request
# if response.status_code != 200:
# # Print the error code and message if something went wrong
# print(response.json())
#
# # print(json.dumps(response.json(), sort_keys=True, indent=4))
#
# totalRows = response.json['manifest']['rowsReturned']
# totalBytes = len(json.dumps(response.json))
# next_page = response.json['next_page']
# print('totalRows: {}, totalBytes: {}, next_page: {}, time: {}, rate: {}'.format(
# totalRows, totalBytes, next_page[:16], elapsed, len(json.dumps(response.json))/elapsed
# ))
#
# while next_page:
# query_string['next_page'] = response.json['next_page']
#
# start = time.time()
# response = client.post('v1/cohorts/manifest/preview',
# query_string=query_string,
# data=json.dumps(cohortSpec),
# headers=headers)
#
# elapsed = time.time() - start
# totalTime += elapsed
#
# # Check that there wasn't an error with the request
# if response.status_code != 200:
# # Print the error code and message if something went wrong
# print(response.json)
# break
#
# totalRows += response.json['manifest']['rowsReturned']
# totalBytes += len(json.dumps(response.json))
# next_page = response.json['next_page']
#
# print('totalRows: {}, totalBytes: {}, next_page: {}, time: {}, rate: {}'.format(
# totalRows, totalBytes, next_page[:16], elapsed, len(json.dumps(response.json)) / elapsed
# ))
#
# print('Total time: {}, rate: {}'.format(totalTime, totalBytes/totalTime))
| 33.506726
| 132
| 0.601847
|
8dedfb3bcc6095882561dbe17d80cf9f412c353d
| 23,106
|
py
|
Python
|
main.py
|
alldbi/Factorized-Spatial-Embeddings
|
f4d63ff069b735b4496d832850152bc07748c752
|
[
"MIT"
] | 33
|
2018-07-16T06:10:23.000Z
|
2022-03-14T16:47:14.000Z
|
main.py
|
alldbi/Factorized-Spatial-Embeddings
|
f4d63ff069b735b4496d832850152bc07748c752
|
[
"MIT"
] | null | null | null |
main.py
|
alldbi/Factorized-Spatial-Embeddings
|
f4d63ff069b735b4496d832850152bc07748c752
|
[
"MIT"
] | 6
|
2018-07-16T15:03:32.000Z
|
2020-07-13T13:46:23.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import glob, os, random, math, collections, time, argparse, shutil
from utils.warp import feature_warping2, image_warping2
from matplotlib import cm
# Parameter setting ****************************************************************************************************
SAVE_FREQ = 500
SUMMARY_FREQ = 20
MODE = "test"
BATCH_SIZE = 32
DATA_DIRECTORY = '/media/lab320/0274E2F866ED37FC/dataset/CelebA/img_align_celeba'
LANDMARK_N = 32
DOWNSAMPLE_M = 4
DIVERSITY = 500.
ALIGN = 1.
LEARNING_RATE = 1.e-4
MOMENTUM = 0.5
RANDOM_SEED = 1234
WEIGHT_DECAY = 0.0005
SCALE_SIZE = 146
CROP_SIZE = 146
MAX_EPOCH = 200
OUTPUT_DIR = './OUTPUT'
CHECKPOINT = './backup/model/'
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Factorized Spatial Embeddings")
parser.add_argument("--mode", default=MODE, choices=["train", "test"])
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--input_dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the training or testing images.")
parser.add_argument("--learning_rate", type=float, default=LEARNING_RATE,
help="Learning rate for adam.")
parser.add_argument("--beta1", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--K", type=int, default=LANDMARK_N,
help="Number of landmarks.")
parser.add_argument("--M", type=int, default=DOWNSAMPLE_M,
help="Downsampling value of the diversity loss.")
parser.add_argument("--weight_decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random seed to have reproducible results.")
parser.add_argument("--diversity_weight", type=float, default=DIVERSITY,
help="Weight on diversity loss.")
parser.add_argument("--align_weight", type=float, default=ALIGN,
help="Weight on align loss.")
parser.add_argument("--scale_size", type=int, default=SCALE_SIZE,
help="Scale images to this size before cropping to CROP_SIZE")
parser.add_argument("--crop_size", type=int, default=CROP_SIZE,
help="CROP images to this size")
parser.add_argument("--max_epochs", type=int, default=MAX_EPOCH,
help="Number of training epochs")
parser.add_argument("--checkpoint", default=CHECKPOINT,
help="Directory with checkpoint to resume training from or use for testing")
parser.add_argument("--output_dir", default=OUTPUT_DIR,
help="Where to put output files")
parser.add_argument("--summary_freq", type=int, default=SUMMARY_FREQ,
help="Update summaries every summary_freq steps")
parser.add_argument("--save_freq", type=int, default=SAVE_FREQ, help="Save model every save_freq steps")
return parser.parse_args()
def landmark_colors(n_landmarks):
"""Compute landmark colors.
Returns:
An array of RGB values.
"""
cmap = cm.get_cmap('hsv')
landmark_color = []
landmark_color.append((0., 0., 0.))
for i in range(n_landmarks):
landmark_color.append(cmap(i/float(n_landmarks))[0:3])
landmark_color = np.array(landmark_color)
return landmark_color
# Collections definition
Examples = collections.namedtuple("Examples",
"paths, images, images_deformed, deformation, count, steps_per_epoch, shape")
Model = collections.namedtuple("Model", "pos_loss, neg_loss, distance")
def weight_decay():
"""Compute weight decay loss.
Returns:
Weight decay loss.
"""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('filter')>0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def conv(batch_input, out_channels, stride=1):
with tf.variable_scope("conv"):
in_channels = batch_input.get_shape()[3]
filter = tf.get_variable("filter", [5, 5, in_channels, out_channels], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.02))
conv = tf.nn.conv2d(batch_input, filter, [1, stride, stride, 1], padding="VALID")
return conv
def save_images(fetches, args, step=None):
image_dir = os.path.join(args.output_dir, "images")
if not os.path.exists(image_dir):
os.makedirs(image_dir)
filesets = []
for i, in_path in enumerate(fetches["paths"]):
name, _ = os.path.splitext(os.path.basename(in_path.decode("utf8")))
fileset = {"name": name, "step": step}
filename = name + "-" + "outputs" + ".png"
if step is not None:
filename = "%08d-%s" % (step, filename)
fileset["outputs"] = filename
out_path = os.path.join(image_dir, filename)
contents = fetches["outputs"][i]
with open(out_path, "wb") as f:
f.write(contents)
filesets.append(fileset)
return filesets
def preprocess(image):
with tf.name_scope("preprocess"):
# [0, 1] => [-1, 1]
return image * 2 - 1
def deprocess(image):
with tf.name_scope("deprocess"):
# [-1, 1] => [0, 1]
return (image + 1) / 2
def load_examples(args):
"""Load all images in the input_dir.
Returns:
Examples.paths : batch of path of images,
Examples.images : batch of images,
Examples.images_deformed : batch of deformed images,
Examples.deformation : batch of deformation parameters,
"""
if args.input_dir is None or not os.path.exists(args.input_dir):
raise Exception("input_dir does not exist")
decode = tf.image.decode_jpeg
# load distorted pairs address
input_paths = glob.glob(os.path.join(args.input_dir, "*.jpg"))
if len(input_paths) == 0:
raise Exception("input_dir contains no image files")
def get_name(path):
name, _ = os.path.splitext(os.path.basename(path))
return name
# if the image names are numbers, sort by the value rather than asciibetically
# having sorted inputs means that the outputs are sorted in test mode
if all(get_name(path).isdigit() for path in input_paths):
input_paths = sorted(input_paths, key=lambda path: int(get_name(path)))
else:
input_paths = sorted(input_paths)
with tf.name_scope("load_images"):
path_queue = tf.train.string_input_producer(input_paths, shuffle= args.mode == "train")
reader = tf.WholeFileReader()
paths, contents = reader.read(path_queue)
raw_input = decode(contents)
raw_input = tf.image.convert_image_dtype(raw_input, dtype=tf.float32)
assertion = tf.assert_equal(tf.shape(raw_input)[2], 3, message="image does not have required channels")
with tf.control_dependencies([assertion]):
raw_input = tf.identity(raw_input)
raw_input.set_shape([None, None, 3])
images = preprocess(raw_input)
seed = random.randint(0, 2 ** 31 - 1)
# scale and crop input image to match 256x256 size
def transform(image):
r = image
r = tf.image.resize_images(r, [args.scale_size, args.scale_size], method=tf.image.ResizeMethod.AREA)
offset = tf.cast(tf.floor(tf.random_uniform([2], 0, args.scale_size - args.crop_size + 1, seed=seed)), dtype=tf.int32)
if args.scale_size > args.crop_size:
r = tf.image.crop_to_bounding_box(r, offset[0], offset[1], args.crop_size, args.crop_size)
elif args.scale_size < args.crop_size:
raise Exception("scale size cannot be less than crop size")
return r
with tf.name_scope("images"):
input_images = transform(images)
if args.mode=="train":
input_images, _ = image_warping2(input_images, w=0.0)
deformed_images, deformation = image_warping2(input_images, w=0.1)
deformation = tf.squeeze(deformation)
# crop after warping
input_images = tf.image.crop_to_bounding_box(input_images, 5, 5, 128, 128)
deformed_images = tf.image.crop_to_bounding_box(deformed_images, 5, 5, 128, 128)
# clip image values
input_images = tf.clip_by_value(input_images, clip_value_min=-1., clip_value_max=1.)
deformed_images = tf.clip_by_value(deformed_images, clip_value_min=-1., clip_value_max=1.)
paths_batch, images_batch, images_deformed_batch, deformation_batch = tf.train.batch(
[paths, input_images, deformed_images, deformation], batch_size=args.batch_size)
steps_per_epoch = int(math.ceil(len(input_paths) / args.batch_size))
return Examples(
paths=paths_batch,
images=images_batch,
images_deformed=images_deformed_batch,
deformation=deformation_batch,
count=len(input_paths),
steps_per_epoch=steps_per_epoch,
shape=raw_input.get_shape()
)
def CNN_tower(inputs, n_landmarks, isTrain):
n_filters = [20, 48, 64, 80, 256, n_landmarks]
with tf.variable_scope("layer_1"):
x = conv(inputs, n_filters[0])
x = tf.contrib.layers.batch_norm(x, updates_collections=None, decay=0.9, center=True,
scale=True,
activation_fn=tf.nn.relu, is_training=isTrain)
# only the first layer has a 2x2 maxpooling
x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2)
with tf.variable_scope("layer_2"):
x = conv(x, n_filters[1])
x = tf.contrib.layers.batch_norm(x, updates_collections=None, decay=0.9, center=True,
scale=True,
activation_fn=tf.nn.relu, is_training=isTrain)
with tf.variable_scope("layer_3"):
x = conv(x, n_filters[2])
x = tf.contrib.layers.batch_norm(x, updates_collections=None, decay=0.9, center=True,
scale=True,
activation_fn=tf.nn.relu, is_training=isTrain)
with tf.variable_scope("layer_4"):
x = conv(x, n_filters[3])
x = tf.contrib.layers.batch_norm(x, updates_collections=None, decay=0.9, center=True,
scale=True,
activation_fn=tf.nn.relu, is_training=isTrain)
with tf.variable_scope("layer_5"):
x = conv(x, n_filters[4])
x = tf.contrib.layers.batch_norm(x, updates_collections=None, decay=0.9, center=True,
scale=True,
activation_fn=tf.nn.relu, is_training=isTrain)
with tf.variable_scope("layer_6"):
x = conv(x, n_filters[5])
x = tf.contrib.layers.batch_norm(x, updates_collections=None, decay=0.9, center=True,
scale=True,
activation_fn=tf.nn.relu, is_training=isTrain)
return x
def align_loss(predA_deformed, predB, n_landmarks):
# compute the mean of landmark locations
batch_size = predB.get_shape()[0]
pred_size = predB.get_shape()[1]
index = tf.range(0, tf.cast(pred_size, tf.float32), delta=1, dtype=tf.float32)
index = tf.reshape(index, [pred_size, 1])
x_index = tf.tile(index, [1, pred_size])
index = tf.transpose(index)
y_index = tf.tile(index, [pred_size, 1])
x_index = tf.expand_dims(x_index, 2)
x_index = tf.expand_dims(x_index, 0)
y_index = tf.expand_dims(y_index, 2)
y_index = tf.expand_dims(y_index, 0)
x_index = tf.tile(x_index, [batch_size, 1, 1, n_landmarks])
y_index = tf.tile(y_index, [batch_size, 1, 1, n_landmarks])
x_index_avg_A = x_index * predA_deformed
y_index_avg_A = y_index * predA_deformed
x_index_avg_B = x_index * predB
y_index_avg_B = y_index * predB
pA_sum = tf.reduce_sum(predA_deformed, axis=[1, 2])
pB_sum = tf.reduce_sum(predB, axis=[1, 2])
x_index_avg_A = tf.reduce_mean(x_index_avg_A, axis=[1, 2])
y_index_avg_A = tf.reduce_mean(y_index_avg_A, axis=[1, 2])
x_index_avg_B = tf.reduce_mean(x_index_avg_B, axis=[1, 2])
y_index_avg_B = tf.reduce_mean(y_index_avg_B, axis=[1, 2])
x_index_avg_A = x_index_avg_A / pA_sum
y_index_avg_A = y_index_avg_A / pA_sum
x_index_avg_B = x_index_avg_B / pB_sum
y_index_avg_B = y_index_avg_B / pB_sum
# compute align loss
loss = tf.pow(x_index_avg_A-x_index_avg_B, 2.) + tf.pow(y_index_avg_A - y_index_avg_B, 2.)
loss = tf.reduce_mean(loss)
return loss, x_index, y_index
def align_loss2(predA, predB, deformation, n_landmarks):
# compute the mean of landmark locations
batch_size = predA.get_shape()[0]
pred_size = predA.get_shape()[1]
index = tf.range(0, tf.cast(pred_size, tf.float32), delta=1, dtype=tf.float32)
index = tf.reshape(index, [pred_size, 1])
x_index = tf.tile(index, [1, pred_size])
index = tf.transpose(index)
y_index = tf.tile(index, [pred_size, 1])
x_index = tf.expand_dims(x_index, 2)
x_index = tf.expand_dims(x_index, 0)
y_index = tf.expand_dims(y_index, 2)
y_index = tf.expand_dims(y_index, 0)
x_index = tf.tile(x_index, [batch_size, 1, 1, n_landmarks])
y_index = tf.tile(y_index, [batch_size, 1, 1, n_landmarks])
u_norm2 = tf.pow(x_index, 2.) + tf.pow(y_index, 2.)
u_norm2 = u_norm2 * predA
loss_part1 = tf.reduce_sum(u_norm2, axis=[1, 2])
x_index_deformed = feature_warping2(x_index, deformation, padding=3)
y_index_defomred = feature_warping2(y_index, deformation, padding=3)
v_norm2 = tf.pow(x_index_deformed, 2.) + tf.pow(y_index_defomred, 2.)
v_norm2 = v_norm2 * predB
loss_part2 = tf.reduce_sum(v_norm2, axis=[1, 2])
loss_part3x = tf.reduce_sum(x_index * predA, axis=[1, 2])
loss_part3y = tf.reduce_sum(y_index * predA, axis=[1, 2])
loss_part4x = tf.reduce_sum(x_index_deformed * predB, axis=[1, 2])
loss_part4y = tf.reduce_sum(y_index_defomred * predB, axis=[1, 2])
loss_part3 = loss_part3x * loss_part4x + loss_part3y * loss_part4y
loss = loss_part1 + loss_part2 - 2. * loss_part3
loss = tf.reduce_mean(loss)
return loss
def main():
"""Create the model and start the training."""
args = get_arguments()
tf.set_random_seed(args.random_seed)
examples = load_examples(args)
print("examples count = %d" % examples.count)
with tf.variable_scope("cnn_tower"):
predA = CNN_tower(examples.images, n_landmarks=args.K, isTrain=args.mode == "train")
with tf.variable_scope("cnn_tower", reuse=True):
predB = CNN_tower(examples.images_deformed, n_landmarks=args.K, isTrain=args.mode == "train")
# apply a spatial softmax to obtain K probability maps
pred_size = predA.get_shape()[1]
predA = tf.reshape(predA, [-1, pred_size*pred_size, args.K])
predB = tf.reshape(predB, [-1, pred_size*pred_size, args.K])
predA = tf.nn.softmax(predA, axis=1)
predB = tf.nn.softmax(predB, axis=1)
predA = tf.reshape(predA, [-1, pred_size, pred_size, args.K])
predB = tf.reshape(predB, [-1, pred_size, pred_size, args.K])
# visualizing landmarks
predA_vis = tf.reduce_mean(predA, axis=3)
predA_vis = tf.expand_dims(predA_vis, axis=3)
# another visualization
pred_max = tf.reduce_max(predA, axis=[1, 2])
pred_max = tf.expand_dims(pred_max, axis=1)
pred_max = tf.expand_dims(pred_max, axis=1)
pred_max = tf.equal(predA, pred_max)
pred_max = tf.cast(pred_max, tf.float32)
mask = tf.range(start=1, limit=args.K+1, delta=1, dtype=tf.float32)
mask = tf.reshape(mask, [1, 1, 1, args.K])
mask = tf.tile(mask, [args.batch_size, pred_size, pred_size, 1])
mask = mask * pred_max
mask = tf.reduce_max(mask, axis=3, keepdims=True)
landmarks = tf.convert_to_tensor(landmark_colors(args.K), tf.float32)
mask = tf.reshape(mask, [args.batch_size, pred_size*pred_size])
mask = tf.cast(mask, tf.int32)
mask = tf.gather(landmarks, mask, axis=0)
mask = tf.reshape(mask, [args.batch_size, pred_size, pred_size, 3])
pred_max = tf.reduce_max(pred_max, axis=3)
pred_max = tf.expand_dims(pred_max, axis=3)
# compute the diversity loss
def diversity_loss(pred, n_landmark, pool_size):
pred_pool = tf.nn.pool(pred, window_shape=[pool_size, pool_size], strides=[1, 1], pooling_type="AVG", padding="VALID")
# convert avg pool to sum pool
# pred_pool = pred_pool * float(pool_size) * float(pool_size)
pred_max = tf.reduce_max(pred_pool, axis=3)
pred_max_sum = tf.reduce_sum(pred_max, axis=[1, 2])
pred_max_sum = float(n_landmark) - pred_max_sum
pred_max_sum = tf.reduce_mean(pred_max_sum)
return pred_max_sum
diversityLoss_predA = diversity_loss(predA, n_landmark=args.K, pool_size=args.M)
diversityLoss_predB = diversity_loss(predB, n_landmark=args.K, pool_size=args.M)
div_loss = diversityLoss_predA + diversityLoss_predB
# compute the align loss
algn_loss = align_loss2(predA, predB, examples.deformation, n_landmarks= args.K)
# compute the weight decay loss
decay_loss = weight_decay() * args.weight_decay
with tf.name_scope("train"):
optim = tf.train.AdamOptimizer(args.learning_rate, args.beta1)
# grads_and_vars = optim.compute_gradients(loss)
# train = optim.apply_gradients(grads_and_vars)
train_op = optim.minimize(algn_loss*args.align_weight + div_loss*args.diversity_weight + decay_loss )
# global_step = tf.contrib.framework.get_or_create_global_step()
global_step = tf.train.get_or_create_global_step()
incr_global_step = tf.assign(global_step, global_step + 1)
train = tf.group(train_op, incr_global_step)
input_images = deprocess(examples.images)
input_deformed = deprocess(examples.images_deformed)
# overlay landmarks on the input image
landmarks_image = pred_max * mask
pred_max_resized = tf.image.resize_images(pred_max, [128, 128], tf.image.ResizeMethod.AREA)
pred_max_resized = tf.greater(pred_max_resized, 0.)
pred_max_resized = tf.cast(pred_max_resized, tf.float32)
mask_resized = tf.image.resize_images(mask, [128, 128])
input_images_landmark = input_images * (1.-pred_max_resized) + pred_max_resized * mask_resized
with tf.name_scope("parameter_count"):
parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])
tf.summary.image("Input", input_images)
tf.summary.image("Deformed", input_deformed)
tf.summary.image("PredA", predA_vis)
# tf.summary.image("AApredAmax", mask)
# tf.summary.image("PredB", predB_vis)
tf.summary.image("Landmark", input_images_landmark)
# tf.summary.image("AApredAmax", landmarks_image)
tf.summary.scalar("loss_align", algn_loss)
tf.summary.scalar("loss_diversity", div_loss)
tf.summary.scalar("loss_decay", decay_loss)
output_images = tf.image.convert_image_dtype(input_images_landmark, dtype=tf.uint8, saturate=True)
with tf.name_scope("encode_images"):
display_fetches = {
"paths": examples.paths,
"outputs": tf.map_fn(tf.image.encode_png, output_images, dtype=tf.string, name="input_pngs"),
}
saver = tf.train.Saver(max_to_keep=1)
sv = tf.train.Supervisor(logdir=os.path.join(os.path.join(args.output_dir, 'logs')), save_summaries_secs=0, saver=None)
with sv.managed_session() as sess:
max_steps = 2 ** 32
if args.max_epochs is not None:
max_steps = examples.steps_per_epoch * args.max_epochs
print ("max epochs: ", args.max_epochs)
print ("max steps : ", max_steps)
start = time.time()
print("parameter_count =", sess.run(parameter_count))
if args.checkpoint is not None:
print ("loading from checkpoint...")
checkpoint = tf.train.latest_checkpoint(args.checkpoint)
saver.restore(sess, checkpoint)
if args.mode == "train":
# training
for step in range(max_steps):
def should(freq):
return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)
fetches = {
"train": train,
"global_step": sv.global_step,
"loss": algn_loss,
"labels": examples.images,
"offset": examples.deformation,
"predA" : predA,
"decay_loss":decay_loss,
"div_loss":div_loss,
}
if should(freq=args.summary_freq):
fetches["summary"] = sv.summary_op
results = sess.run(fetches)
if should(freq=args.summary_freq):
sv.summary_writer.add_summary(results["summary"], results["global_step"])
# global_step will have the correct step count if we resume from a checkpoint
train_epoch = math.ceil(results["global_step"] / examples.steps_per_epoch)
train_step = (results["global_step"] - 1) % examples.steps_per_epoch + 1
rate = (step + 1) * args.batch_size / (time.time() - start)
remaining = (max_steps - step) * args.batch_size / rate
print("progress epoch %d step %d image/sec %0.1f remaining %dm" % (
train_epoch, train_step, rate, remaining / 60))
print ("loss_align", results["loss"])
print ("loss_diversity", results["div_loss"])
print ("loss_decay", results["decay_loss"])
print ("------------------------------")
if should(freq=args.save_freq):
print("saving model")
saver.save(sess, os.path.join(args.output_dir, "model"), global_step=sv.global_step)
elif args.mode=="test":
# testing
start = time.time()
max_steps = min(examples.steps_per_epoch, max_steps)
for step in range(max_steps):
results = sess.run(display_fetches)
filesets = save_images(results, args)
for i, f in enumerate(filesets):
print("evaluated image", f["name"])
print("rate", (time.time() - start) / max_steps)
if __name__ == '__main__':
main()
| 38.89899
| 126
| 0.636242
|
9d3a329a62c37fd1b888502f8d23ddfbdbfda4f7
| 297
|
py
|
Python
|
tests/test_generate_text_utils.py
|
Jeton-Digital/Synthetic-Data-Generator
|
11e198028d1da93d9d68a2251f8372bb05ed6b60
|
[
"MIT"
] | 1
|
2021-11-03T13:26:29.000Z
|
2021-11-03T13:26:29.000Z
|
tests/test_generate_text_utils.py
|
Jeton-Digital/Synthetic-Data-Generator
|
11e198028d1da93d9d68a2251f8372bb05ed6b60
|
[
"MIT"
] | null | null | null |
tests/test_generate_text_utils.py
|
Jeton-Digital/Synthetic-Data-Generator
|
11e198028d1da93d9d68a2251f8372bb05ed6b60
|
[
"MIT"
] | 1
|
2021-11-03T13:26:05.000Z
|
2021-11-03T13:26:05.000Z
|
def not_contain_numeric(name_list):
for name in name_list:
for ch in name:
if ch.isnumeric():
return False
return True
def email_contains_at(email_list):
for email in email_list:
if "@" not in email:
return False
return True
| 24.75
| 35
| 0.59596
|
e4af4945aed9b1903abda89ba191bc71670bd2f6
| 1,619
|
py
|
Python
|
cheritest/trunk/tests/branch/test_raw_bltzal_gt.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 36
|
2015-05-29T16:47:19.000Z
|
2022-02-08T21:16:26.000Z
|
cheritest/trunk/tests/branch/test_raw_bltzal_gt.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 2
|
2020-06-02T13:44:55.000Z
|
2020-06-02T14:06:29.000Z
|
cheritest/trunk/tests/branch/test_raw_bltzal_gt.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 15
|
2015-06-11T07:10:58.000Z
|
2021-06-18T05:14:54.000Z
|
#-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
class test_raw_bltzal_gt(BaseBERITestCase):
def test_before_bltzal(self):
self.assertRegisterEqual(self.MIPS.a0, 1, "instruction before bltzal missed")
def test_bltzal_branch_delay(self):
self.assertRegisterEqual(self.MIPS.a1, 2, "instruction in branch-delay slot missed")
def test_bltzal_notskipped(self):
self.assertRegisterEqual(self.MIPS.a2, 3, "instruction after branch-delay slot missed")
| 40.475
| 95
| 0.765905
|
a8b8437ce30768ca779d7a2d82616d929f4e4765
| 2,888
|
py
|
Python
|
blockrange.py
|
buzzkillb/snakes-on-a-chain
|
09e18f0e985d3949195ed3475f2198baa2167130
|
[
"MIT"
] | 4
|
2019-10-12T02:23:56.000Z
|
2021-06-13T12:58:10.000Z
|
blockrange.py
|
buzzkillb/snakes-on-a-chain
|
09e18f0e985d3949195ed3475f2198baa2167130
|
[
"MIT"
] | null | null | null |
blockrange.py
|
buzzkillb/snakes-on-a-chain
|
09e18f0e985d3949195ed3475f2198baa2167130
|
[
"MIT"
] | 4
|
2019-10-12T02:23:58.000Z
|
2021-04-14T22:08:15.000Z
|
from denariusrpc.authproxy import AuthServiceProxy, JSONRPCException
import time
import sys
import datetime
import urllib
import json
from influxdb import InfluxDBClient
# rpc_user and rpc_password are set in the denarius.conf file
rpc_connection = AuthServiceProxy("http://%s:%s@127.0.0.1:32369"%("rpcuser", "rpcpassword"))
#for i in range(3):
# print(i)
# block = rpc_connection.getblockbynumber(i)
# print(block)
# Configure InfluxDB connection variables
host = "127.0.0.1" # My Ubuntu NUC
port = 8086 # default port
user = "admin" # the user/password created for the pi, with write access
password = "admin"
dbname = "blocks" # the database we created earlier
interval = 60 # Sample period in seconds
# Create the InfluxDB client object
client = InfluxDBClient(host, port, user, password, dbname)
# think of measurement as a SQL table, it's not...but...
measurement = "measurement"
# location will be used as a grouping tag later
blockchain = "denarius"
# Run until you get a ctrl^c
#def main():
import time
currentblock = block = rpc_connection.getblockcount()
scanback = 100
lastrange = currentblock - scanback
for i in range(lastrange, currentblock):
print(i)
block = rpc_connection.getblockbynumber(i)
grafanatime = block['time'] * 1000000000
hash = block['hash']
size = block['size']
height = block['height']
version = block['version']
merkleroot = block['merkleroot']
mint = int(block['mint'])
timed = int(block['time'])
nonce = block['nonce']
bits = block['bits']
difficulty = float(block['difficulty'])
blocktrust = block['blocktrust']
chaintrust = block['chaintrust']
chainwork = block['chainwork']
previousblockhash = block['previousblockhash']
nextblockhash = block['nextblockhash']
flags = block['flags']
proofhash = block['proofhash']
entropybit = block['entropybit']
modifier = block['modifier']
modifierchecksum = block['modifierchecksum']
data = [
{
"measurement": measurement,
"tags": {
"blockchain": blockchain,
},
"time": grafanatime,
"fields": {
#"block" : i,
"hash" : hash,
"size" : size,
"height" : height,
"version" : version,
"merkleroot" : merkleroot,
"mint" : mint,
"time" : timed,
"nonce" : nonce,
"bits" : bits,
"difficulty" : difficulty,
"blocktrust" : blocktrust,
"chaintrust" : chaintrust,
"chainwork" : chainwork,
"nextblockhash" : nextblockhash,
"flags" : flags,
"proofhash" : proofhash,
"entropybit" : entropybit,
"modifier" : modifier,
"modifierchecksum" : modifierchecksum
}
}
]
# Send the JSON data to InfluxDB
# print(difficulty)
# print(timed)
client.write_points(data)
| 28.594059
| 92
| 0.638504
|
ab4753559512e7bbcdaf19b19147ff8ab726266e
| 5,826
|
py
|
Python
|
intersight/models/firmware_nfs_server.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/firmware_nfs_server.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/firmware_nfs_server.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FirmwareNfsServer(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'mount_options': 'str',
'remote_file': 'str',
'remote_ip': 'str',
'remote_share': 'str'
}
attribute_map = {
'mount_options': 'MountOptions',
'remote_file': 'RemoteFile',
'remote_ip': 'RemoteIp',
'remote_share': 'RemoteShare'
}
def __init__(self, mount_options=None, remote_file=None, remote_ip=None, remote_share=None):
"""
FirmwareNfsServer - a model defined in Swagger
"""
self._mount_options = None
self._remote_file = None
self._remote_ip = None
self._remote_share = None
if mount_options is not None:
self.mount_options = mount_options
if remote_file is not None:
self.remote_file = remote_file
if remote_ip is not None:
self.remote_ip = remote_ip
if remote_share is not None:
self.remote_share = remote_share
@property
def mount_options(self):
"""
Gets the mount_options of this FirmwareNfsServer.
Mount option as configured on the NFS Server. Example:nolock.
:return: The mount_options of this FirmwareNfsServer.
:rtype: str
"""
return self._mount_options
@mount_options.setter
def mount_options(self, mount_options):
"""
Sets the mount_options of this FirmwareNfsServer.
Mount option as configured on the NFS Server. Example:nolock.
:param mount_options: The mount_options of this FirmwareNfsServer.
:type: str
"""
self._mount_options = mount_options
@property
def remote_file(self):
"""
Gets the remote_file of this FirmwareNfsServer.
Filename of the image in the remote share location. Example:ucs-c220m5-huu-3.1.2c.iso.
:return: The remote_file of this FirmwareNfsServer.
:rtype: str
"""
return self._remote_file
@remote_file.setter
def remote_file(self, remote_file):
"""
Sets the remote_file of this FirmwareNfsServer.
Filename of the image in the remote share location. Example:ucs-c220m5-huu-3.1.2c.iso.
:param remote_file: The remote_file of this FirmwareNfsServer.
:type: str
"""
self._remote_file = remote_file
@property
def remote_ip(self):
"""
Gets the remote_ip of this FirmwareNfsServer.
NFS Server Hostname or IP Address. Example:nfs-server-hostname or 10.10.8.7.
:return: The remote_ip of this FirmwareNfsServer.
:rtype: str
"""
return self._remote_ip
@remote_ip.setter
def remote_ip(self, remote_ip):
"""
Sets the remote_ip of this FirmwareNfsServer.
NFS Server Hostname or IP Address. Example:nfs-server-hostname or 10.10.8.7.
:param remote_ip: The remote_ip of this FirmwareNfsServer.
:type: str
"""
self._remote_ip = remote_ip
@property
def remote_share(self):
"""
Gets the remote_share of this FirmwareNfsServer.
Directory where the image is stored. Example:/share/subfolder.
:return: The remote_share of this FirmwareNfsServer.
:rtype: str
"""
return self._remote_share
@remote_share.setter
def remote_share(self, remote_share):
"""
Sets the remote_share of this FirmwareNfsServer.
Directory where the image is stored. Example:/share/subfolder.
:param remote_share: The remote_share of this FirmwareNfsServer.
:type: str
"""
self._remote_share = remote_share
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FirmwareNfsServer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.742857
| 96
| 0.586509
|
f04b2259d54a10bcdb0b3c47397e78e4380f21fb
| 3,010
|
py
|
Python
|
src/board.py
|
juangallostra/2048
|
3f4f1b33a964ea10b5b5aec9fb8f7f2335f5a7ab
|
[
"MIT"
] | 1
|
2018-01-20T06:49:38.000Z
|
2018-01-20T06:49:38.000Z
|
src/board.py
|
juangallostra/2048
|
3f4f1b33a964ea10b5b5aec9fb8f7f2335f5a7ab
|
[
"MIT"
] | 1
|
2017-11-30T17:46:06.000Z
|
2017-11-30T17:46:06.000Z
|
src/board.py
|
juangallostra/2048
|
3f4f1b33a964ea10b5b5aec9fb8f7f2335f5a7ab
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
def hex_to_rgb(hex_color):
"""
Helper function to convert hex strings to RGB
"""
hex_color = hex_color.lstrip('#')
h_len = len(hex_color)
return tuple(int(hex_color[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))
class Board:
"""
Class that draws the current board and game state
"""
def __init__(self, rows, cols, game_state, padding, tile_size, background_color, empty_tile_color,
background_tile_colors, tile_colors, font):
self._rows = rows
self._cols = cols
self._padding = padding
self._tile_size = tile_size
self._background_color = background_color
self._empty_tile_color = empty_tile_color
self._background_tile_colors = background_tile_colors
self._tile_colors = tile_colors
self._font = pygame.font.SysFont(font[0], font[1], bold=True)
self._width = cols*self._tile_size+(cols+1)*self._padding
self._height = rows*self._tile_size+(rows+1)*self._padding
self._grid = game_state
self._surface = pygame.Surface((self._width, self._height))
self._surface.fill(hex_to_rgb(self._background_color))
def update_board(self, game_state):
"""
Update the state of the board
"""
self._grid = game_state
def draw_board(self):
"""
Draw the board without tiles
"""
# Create one row
row = pygame.Surface((self._width, self._tile_size+self._padding), pygame.SRCALPHA, 32)
row = row.convert_alpha()
for col_num in range(self._cols):
tile = pygame.Surface((self._tile_size, self._tile_size))
tile.fill(hex_to_rgb(self._empty_tile_color))
row.blit(tile, (self._padding+col_num*(self._padding+self._tile_size), self._padding))
# Add as many empty rows to the board as the specified number of rows
for row_num in range(self._rows):
self._surface.blit(row, (0, (self._padding+self._tile_size)*row_num))
def draw_tile(self, row, col, tile_value):
"""
Draw a tile on the board
"""
tile = pygame.Surface((self._tile_size, self._tile_size))
tile.fill(hex_to_rgb(self._background_tile_colors[tile_value]))
text = self._font.render(str(tile_value), True, hex_to_rgb(self._tile_colors[tile_value]))
text_width, text_height = text.get_size()
tile.blit(text, ((self._tile_size-text_width)//2, (self._tile_size-text_height)//2))
self._surface.blit(tile, (self._padding+(self._padding+self._tile_size)*col,
self._padding+(self._padding+self._tile_size)*row))
def draw_tiles(self):
for row in range(self._rows):
for col in range(self._cols):
if self._grid[row][col] != 0:
self.draw_tile(row, col, self._grid[row][col])
def get_board(self):
return self._surface
| 39.605263
| 102
| 0.636545
|
fe36a9c38f57d8429dfa27770f931ed47ff714d3
| 9,736
|
py
|
Python
|
mne/channels/_dig_montage_utils.py
|
abramhindle/mne-python
|
989390a484cba219aae74c778b71568586f9edb2
|
[
"BSD-3-Clause"
] | null | null | null |
mne/channels/_dig_montage_utils.py
|
abramhindle/mne-python
|
989390a484cba219aae74c778b71568586f9edb2
|
[
"BSD-3-Clause"
] | 1
|
2019-09-17T23:54:38.000Z
|
2019-09-17T23:54:38.000Z
|
mne/channels/_dig_montage_utils.py
|
abramhindle/mne-python
|
989390a484cba219aae74c778b71568586f9edb2
|
[
"BSD-3-Clause"
] | null | null | null |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Marijn van Vliet <w.m.vanvliet@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
# Christian Brodbeck <christianbrodbeck@nyu.edu>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
import xml.etree.ElementTree as ElementTree
import numpy as np
from ..transforms import apply_trans, get_ras_to_neuromag_trans
from ..io.constants import FIFF
from ..utils import _check_fname, Bunch, warn
def _fix_data_fiducials(data):
nasion, rpa, lpa = data.nasion, data.rpa, data.lpa
if any(x is None for x in (nasion, rpa, lpa)):
if data.elp is None or data.point_names is None:
raise ValueError('ELP points and names must be specified for '
'transformation.')
names = [name.lower() for name in data.point_names]
# check that all needed points are present
kinds = ('nasion', 'lpa', 'rpa')
missing = [name for name in kinds if name not in names]
if len(missing) > 0:
raise ValueError('The points %s are missing, but are needed '
'to transform the points to the MNE '
'coordinate system. Either add the points, '
'or read the montage with transform=False.'
% str(missing))
data.nasion, data.lpa, data.rpa = [
data.elp[names.index(kind)] for kind in kinds
]
# remove fiducials from elp
mask = np.ones(len(names), dtype=bool)
for fid in ['nasion', 'lpa', 'rpa']:
mask[names.index(fid)] = False
data.elp = data.elp[mask]
data.point_names = [p for pi, p in enumerate(data.point_names)
if mask[pi]]
return data
def _transform_to_head_call(data):
"""Transform digitizer points to Neuromag head coordinates.
Parameters
----------
data : Bunch.
replicates DigMontage old structure. Requires the following fields:
['nasion', 'lpa', 'rpa', 'hsp', 'hpi', 'elp', 'coord_frame',
'dig_ch_pos']
Returns
-------
data : Bunch.
transformed version of input data.
"""
if data.coord_frame == 'head': # nothing to do
return data
nasion, rpa, lpa = data.nasion, data.rpa, data.lpa
native_head_t = get_ras_to_neuromag_trans(nasion, lpa, rpa)
data.nasion, data.lpa, data.rpa = apply_trans(
native_head_t, np.array([nasion, lpa, rpa]))
if data.elp is not None:
data.elp = apply_trans(native_head_t, data.elp)
if data.hsp is not None:
data.hsp = apply_trans(native_head_t, data.hsp)
if data.dig_ch_pos is not None:
for key, val in data.dig_ch_pos.items():
data.dig_ch_pos[key] = apply_trans(native_head_t, val)
data.coord_frame = 'head'
return data
_cardinal_ident_mapping = {
FIFF.FIFFV_POINT_NASION: 'nasion',
FIFF.FIFFV_POINT_LPA: 'lpa',
FIFF.FIFFV_POINT_RPA: 'rpa',
}
def _read_dig_montage_egi(
fname,
_scaling,
_all_data_kwargs_are_none,
):
if not _all_data_kwargs_are_none:
raise ValueError('hsp, hpi, elp, point_names, fif must all be '
'None if egi is not None')
_check_fname(fname, overwrite='read', must_exist=True)
root = ElementTree.parse(fname).getroot()
ns = root.tag[root.tag.index('{'):root.tag.index('}') + 1]
sensors = root.find('%ssensorLayout/%ssensors' % (ns, ns))
fids = dict()
dig_ch_pos = dict()
fid_name_map = {'Nasion': 'nasion',
'Right periauricular point': 'rpa',
'Left periauricular point': 'lpa'}
for s in sensors:
name, number, kind = s[0].text, int(s[1].text), int(s[2].text)
coordinates = np.array([float(s[3].text), float(s[4].text),
float(s[5].text)])
coordinates *= _scaling
# EEG Channels
if kind == 0:
dig_ch_pos['EEG %03d' % number] = coordinates
# Reference
elif kind == 1:
dig_ch_pos['EEG %03d' %
(len(dig_ch_pos.keys()) + 1)] = coordinates
# XXX: we should do something with this (ref and eeg get mixed)
# Fiducials
elif kind == 2:
fid_name = fid_name_map[name]
fids[fid_name] = coordinates
# Unknown
else:
warn('Unknown sensor type %s detected. Skipping sensor...'
'Proceed with caution!' % kind)
return Bunch(
# EGI stuff
nasion=fids['nasion'], lpa=fids['lpa'], rpa=fids['rpa'],
dig_ch_pos=dig_ch_pos, coord_frame='unknown',
# not EGI stuff
hsp=None, hpi=None, elp=None, point_names=None,
)
def _foo_get_data_from_dig(dig):
# XXXX:
# This does something really similar to _read_dig_montage_fif but:
# - does not check coord_frame
# - does not do any operation that implies assumptions with the names
# Split up the dig points by category
hsp, hpi, elp = list(), list(), list()
fids, dig_ch_pos_location = dict(), list()
for d in dig:
if d['kind'] == FIFF.FIFFV_POINT_CARDINAL:
fids[_cardinal_ident_mapping[d['ident']]] = d['r']
elif d['kind'] == FIFF.FIFFV_POINT_HPI:
hpi.append(d['r'])
elp.append(d['r'])
# XXX: point_names.append('HPI%03d' % d['ident'])
elif d['kind'] == FIFF.FIFFV_POINT_EXTRA:
hsp.append(d['r'])
elif d['kind'] == FIFF.FIFFV_POINT_EEG:
# XXX: dig_ch_pos['EEG%03d' % d['ident']] = d['r']
dig_ch_pos_location.append(d['r'])
dig_coord_frames = set([d['coord_frame'] for d in dig])
assert len(dig_coord_frames) == 1, 'Only single coordinate frame in dig is supported' # noqa # XXX
return Bunch(
nasion=fids.get('nasion', None),
lpa=fids.get('lpa', None),
rpa=fids.get('rpa', None),
hsp=np.array(hsp) if len(hsp) else None,
hpi=np.array(hpi) if len(hpi) else None,
elp=np.array(elp) if len(elp) else None,
dig_ch_pos_location=dig_ch_pos_location,
coord_frame=dig_coord_frames.pop(),
)
def _get_fid_coords(dig):
fid_coords = Bunch(nasion=None, lpa=None, rpa=None)
fid_coord_frames = dict()
for d in dig:
if d['kind'] == FIFF.FIFFV_POINT_CARDINAL:
key = _cardinal_ident_mapping[d['ident']]
fid_coords[key] = d['r']
fid_coord_frames[key] = d['coord_frame']
if len(fid_coord_frames) > 0:
if set(fid_coord_frames.keys()) != set(['nasion', 'lpa', 'rpa']):
raise ValueError("Some fiducial points are missing (got %s)." %
fid_coords.keys())
if len(set(fid_coord_frames.values())) > 1:
raise ValueError(
'All fiducial points must be in the same coordinate system '
'(got %s)' % len(fid_coord_frames)
)
coord_frame = fid_coord_frames.popitem()[1] if fid_coord_frames else None
return fid_coords, coord_frame
def _read_dig_montage_bvct(
fname,
unit,
_all_data_kwargs_are_none,
):
if not _all_data_kwargs_are_none:
raise ValueError('hsp, hpi, elp, point_names, fif must all be '
'None if egi is not None')
_check_fname(fname, overwrite='read', must_exist=True)
# CapTrak is natively in mm
scale = dict(mm=1e-3, cm=1e-2, auto=1e-3, m=1)
if unit not in scale:
raise ValueError("Unit needs to be one of %s, not %r" %
(sorted(scale.keys()), unit))
if unit not in ['mm', 'auto']:
warn('Using "{}" as unit for BVCT file. BVCT files are usually '
'specified in "mm". This might lead to errors.'.format(unit),
RuntimeWarning)
return _parse_brainvision_dig_montage(fname, scale=scale[unit])
BACK_COMPAT = object() # XXX: to remove in 0.20
def _parse_brainvision_dig_montage(fname, scale=BACK_COMPAT):
BVCT_SCALE = 1e-3
FID_NAME_MAP = {'Nasion': 'nasion', 'RPA': 'rpa', 'LPA': 'lpa'}
root = ElementTree.parse(fname).getroot()
sensors = root.find('CapTrakElectrodeList')
fids, dig_ch_pos = dict(), dict()
for s in sensors:
name = s.find('Name').text
# Need to prune "GND" and "REF": these are not included in the raw
# data and will raise errors when we try to do raw.set_montage(...)
# XXX eventually this should be stored in ch['loc'][3:6]
# but we don't currently have such capabilities here
if name in ['GND', 'REF']:
continue
is_fid = name in FID_NAME_MAP
coordinates = np.array([float(s.find('X').text),
float(s.find('Y').text),
float(s.find('Z').text)])
coordinates *= BVCT_SCALE if scale is BACK_COMPAT else scale
# Fiducials
if is_fid:
fids[FID_NAME_MAP[name]] = coordinates
# EEG Channels
else:
dig_ch_pos[name] = coordinates
return Bunch(
# BVCT stuff
nasion=fids['nasion'], lpa=fids['lpa'], rpa=fids['rpa'],
dig_ch_pos=dig_ch_pos, coord_frame='unknown',
# not BVCT stuff
hsp=None, hpi=None, elp=None, point_names=None,
)
| 34.402827
| 102
| 0.587818
|
33361f33a9eeaa778f09c1f52cf00bcef2769e0f
| 2,756
|
py
|
Python
|
datasets/full-emoticons-dataset/emoticon_dataset_generator.py
|
david00medina/cyberattack-forecasting
|
e8a18e474d9eec0800d06e3f21c9cd25cb54e831
|
[
"MIT"
] | null | null | null |
datasets/full-emoticons-dataset/emoticon_dataset_generator.py
|
david00medina/cyberattack-forecasting
|
e8a18e474d9eec0800d06e3f21c9cd25cb54e831
|
[
"MIT"
] | null | null | null |
datasets/full-emoticons-dataset/emoticon_dataset_generator.py
|
david00medina/cyberattack-forecasting
|
e8a18e474d9eec0800d06e3f21c9cd25cb54e831
|
[
"MIT"
] | null | null | null |
# ############################################################################################################
# Copyright (c) 2022 David Alberto Medina Medina. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #
# and associated documentation files (the "Software"), to deal in the Software without restriction, #
# including without limitation the rights to use, copy, modify, merge, publish, distribute, #
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software #
# is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all copies or substantial #
# portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A #
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR #
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN #
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# ############################################################################################################
import string
import pandas as pd
import requests
header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
df = pd.DataFrame(data={"Emoticon": [], "Meaning": [], "Votes": []})
for x in string.ascii_lowercase:
url = f'https://slangit.com/emoticons/{x}'
r = requests.get(url, headers=header)
emoji_df = pd.read_html(r.text)[0]
df = pd.concat([df, emoji_df], ignore_index=True)
df['Meaning'] = df['Meaning'].str.replace(' \([0-9]+\)$', '', regex=True)
df.to_csv('full-emoticons-dataset.csv', index_label=["#"])
| 70.666667
| 123
| 0.468433
|
441d47e7b0a3571874f47f873647a64cd9f8e0a1
| 2,663
|
py
|
Python
|
evaluate.py
|
RManLuo/CP-GNN
|
a186d31167c1b00408de767b78271c5027882e23
|
[
"MIT"
] | 14
|
2021-09-02T02:56:51.000Z
|
2022-02-15T23:37:43.000Z
|
evaluate.py
|
RManLuo/CP-GNN
|
a186d31167c1b00408de767b78271c5027882e23
|
[
"MIT"
] | null | null | null |
evaluate.py
|
RManLuo/CP-GNN
|
a186d31167c1b00408de767b78271c5027882e23
|
[
"MIT"
] | 2
|
2021-11-02T13:57:38.000Z
|
2021-11-17T22:37:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/11 11:11
# @Author : Raymound luo
# @Mail : luolinhao1998@gmail.com
# @File : evaluate.py
# @Software: PyCharm
# @Describe:
from models import ContextGNN
from utils import load_data, evaluate, load_latest_model, save_attention_matrix, generate_attention_heat_map, \
save_config
import torch
import importlib
import os
import argparse
def evaluate_task(config, checkpoint_path=None):
dataloader = load_data(config.data_config)
hg = dataloader.heter_graph
if config.data_config['dataset'] in ['AIFB', 'AM', 'BGS', 'MUTAG']:
config.data_config['primary_type'] = dataloader.predict_category
config.model_config['primary_type'] = dataloader.predict_category
if not checkpoint_path:
model = ContextGNN(hg, config.model_config)
model = load_latest_model(config.train_config['checkpoint_path'], model)
else:
config_path = os.path.join(checkpoint_path, 'config')
config_path = os.path.relpath(config_path)
config_file = config_path.replace(os.sep, '.')
model_path = os.path.join(checkpoint_path, 'model.pth')
config = importlib.import_module(config_file)
model = ContextGNN(hg, config.model_config)
model.load_state_dict(torch.load(model_path))
p_emb = model.primary_emb.weight.detach().cpu().numpy()
CF_data = dataloader.load_classification_data()
# LP_data = dataloader.load_links_prediction_data()
result_save_path = evaluate(p_emb, CF_data, None, method=config.evaluate_config['method'], metric=config.data_config['task'], save_result=True,
result_path=config.evaluate_config['result_path'],
random_state=config.evaluate_config['random_state'],
max_iter=config.evaluate_config['max_iter'], n_jobs=config.evaluate_config['n_jobs'])
if result_save_path:
save_config(config, result_save_path)
model_save_path = os.path.join(result_save_path, "model.pth")
torch.save(model.state_dict(), model_save_path)
attention_matrix_path = save_attention_matrix(model, result_save_path, config.data_config['K_length'])
if attention_matrix_path and config.evaluate_config['save_heat_map']:
generate_attention_heat_map(hg.ntypes, attention_matrix_path)
if __name__ == "__main__":
import config
parser = argparse.ArgumentParser(description='Which checkpoint to load?')
parser.add_argument('-path', default=None, type=str, help='checkpoint path')
args = parser.parse_args()
evaluate_task(config, args.path)
| 45.135593
| 148
| 0.704469
|
5143f17eaa795407817e6d23285a5f621cf5e950
| 482
|
py
|
Python
|
devices/interfaces.py
|
Saldenisov/pyconlyse
|
1de301b4a4c15ee0bd19034aa8d5da1beacfd124
|
[
"MIT"
] | null | null | null |
devices/interfaces.py
|
Saldenisov/pyconlyse
|
1de301b4a4c15ee0bd19034aa8d5da1beacfd124
|
[
"MIT"
] | null | null | null |
devices/interfaces.py
|
Saldenisov/pyconlyse
|
1de301b4a4c15ee0bd19034aa8d5da1beacfd124
|
[
"MIT"
] | null | null | null |
from abc import ABC
from enum import Enum
from typing import NewType
DeviceId = NewType('DeviceId', str)
class DeviceType(str, Enum):
"""
Basic types of devices: Client, Server, Service
"""
CLIENT = 'client'
SERVER = 'server'
SERVICE = 'service'
DEFAULT = 'default'
def __repr__(self): # It is a hack to make DeviceType easily json serializable
return self.__str__()
class DeviceInter(ABC):
pass
class ExecutorInter(ABC):
pass
| 19.28
| 83
| 0.66805
|
7832e9a426b75927002b862a650deb671e4f3094
| 21,847
|
py
|
Python
|
merlin/models/torch/tabular/base.py
|
jperez999/models-1
|
44c71fd5168cae60c56ad300c6e21522a86c1908
|
[
"Apache-2.0"
] | null | null | null |
merlin/models/torch/tabular/base.py
|
jperez999/models-1
|
44c71fd5168cae60c56ad300c6e21522a86c1908
|
[
"Apache-2.0"
] | null | null | null |
merlin/models/torch/tabular/base.py
|
jperez999/models-1
|
44c71fd5168cae60c56ad300c6e21522a86c1908
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from abc import ABC
from functools import reduce
from typing import Dict, List, Optional, Union
import torch
from merlin.models.utils.doc_utils import docstring_parameter
from merlin.models.utils.registry import Registry
from merlin.schema import Schema
from ..block.base import BlockBase, SequentialBlock, right_shift_block
from ..typing import TabularData, TensorOrTabularData
from ..utils.torch_utils import OutputSizeMixin, calculate_batch_size_from_input_size
tabular_transformation_registry: Registry = Registry.class_registry("torch.tabular_transformations")
tabular_aggregation_registry: Registry = Registry.class_registry("torch.tabular_aggregations")
class TabularTransformation(OutputSizeMixin, torch.nn.Module, ABC):
"""Transformation that takes in `TabularData` and outputs `TabularData`."""
def forward(self, inputs: TabularData, **kwargs) -> TabularData:
raise NotImplementedError()
@classmethod
def parse(cls, class_or_str):
return tabular_transformation_registry.parse(class_or_str)
class TabularAggregation(OutputSizeMixin, torch.nn.Module, ABC):
"""Aggregation of `TabularData` that outputs a single `Tensor`"""
def forward(self, inputs: TabularData) -> torch.Tensor:
raise NotImplementedError()
def _expand_non_sequential_features(self, inputs: TabularData) -> TabularData:
inputs_sizes = {k: v.shape for k, v in inputs.items()}
seq_features_shapes, sequence_length = self._get_seq_features_shapes(inputs_sizes)
if len(seq_features_shapes) > 0:
non_seq_features = set(inputs.keys()).difference(set(seq_features_shapes.keys()))
for fname in non_seq_features:
# Including the 2nd dim and repeating for the sequence length
inputs[fname] = inputs[fname].unsqueeze(dim=1).repeat(1, sequence_length, 1)
return inputs
def _get_seq_features_shapes(self, inputs_sizes: Dict[str, torch.Size]):
seq_features_shapes = dict()
for fname, fshape in inputs_sizes.items():
# Saves the shapes of sequential features
if len(fshape) >= 3:
seq_features_shapes[fname] = tuple(fshape[:2])
sequence_length = 0
if len(seq_features_shapes) > 0:
if len(set(seq_features_shapes.values())) > 1:
raise ValueError(
"All sequential features must share the same shape in the first two dims "
"(batch_size, seq_length): {}".format(seq_features_shapes)
)
sequence_length = list(seq_features_shapes.values())[0][1]
return seq_features_shapes, sequence_length
def _check_concat_shapes(self, inputs: TabularData):
input_sizes = {k: v.shape for k, v in inputs.items()}
if len(set(list([v[:-1] for v in input_sizes.values()]))) > 1:
raise Exception(
"All features dimensions except the last one must match: {}".format(input_sizes)
)
def _get_agg_output_size(self, input_size, agg_dim):
batch_size = calculate_batch_size_from_input_size(input_size)
seq_features_shapes, sequence_length = self._get_seq_features_shapes(input_size)
if len(seq_features_shapes) > 0:
return (
batch_size,
sequence_length,
agg_dim,
)
else:
return (batch_size, agg_dim)
@classmethod
def parse(cls, class_or_str):
return tabular_aggregation_registry.parse(class_or_str)
TabularTransformationType = Union[str, TabularTransformation]
TabularTransformationsType = Union[TabularTransformationType, List[TabularTransformationType]]
TabularAggregationType = Union[str, TabularAggregation]
class SequentialTabularTransformations(SequentialBlock):
"""A sequential container, modules will be added to it in the order they are passed in.
Parameters
----------
transformation: TabularTransformationType
transformations that are passed in here will be called in order.
"""
def __init__(self, *transformation: TabularTransformationsType):
if len(transformation) == 1 and isinstance(transformation, list):
transformation = transformation[0]
super().__init__(*[TabularTransformation.parse(t) for t in transformation])
def append(self, transformation):
self.transformations.append(TabularTransformation.parse(transformation))
TABULAR_MODULE_PARAMS_DOCSTRING = """
pre: Union[str, TabularTransformation, List[str], List[TabularTransformation]], optional
Transformations to apply on the inputs when the module is called (so **before** `forward`).
post: Union[str, TabularTransformation, List[str], List[TabularTransformation]], optional
Transformations to apply on the inputs after the module is called (so **after** `forward`).
aggregation: Union[str, TabularAggregation], optional
Aggregation to apply after processing the `forward`-method to output a single Tensor.
"""
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING)
class TabularModule(torch.nn.Module):
"""PyTorch Module that's specialized for tabular-data by integrating many often used operations.
Parameters
----------
{tabular_module_parameters}
"""
def __init__(
self,
pre: Optional[TabularTransformationsType] = None,
post: Optional[TabularTransformationsType] = None,
aggregation: Optional[TabularAggregationType] = None,
**kwargs,
):
super().__init__()
self.input_size = None
self.pre = pre # type: ignore
self.post = post # type: ignore
self.aggregation = aggregation # type: ignore
@classmethod
def from_schema(cls, schema: Schema, tags=None, **kwargs) -> Optional["TabularModule"]:
"""Instantiate a TabularModule instance from a DatasetSchema.
Parameters
----------
schema
tags
kwargs
Returns
-------
Optional[TabularModule]
"""
schema_copy = copy.copy(schema)
if tags:
schema_copy = schema_copy.select_by_tag(tags)
if not schema_copy.column_schemas:
return None
return cls.from_features(schema_copy.column_names, schema=schema_copy, **kwargs)
@classmethod
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING, extra_padding=4)
def from_features(
cls,
features: List[str],
pre: Optional[TabularTransformationsType] = None,
post: Optional[TabularTransformationsType] = None,
aggregation: Optional[TabularAggregationType] = None,
) -> "TabularModule":
"""Initializes a TabularModule instance where the contents of features will be filtered
out
Parameters
----------
features: List[str]
A list of feature-names that will be used as the first pre-processing op to filter out
all other features not in this list.
{tabular_module_parameters}
Returns
-------
TabularModule
"""
pre = [FilterFeatures(features), pre] if pre else FilterFeatures(features) # type: ignore
return cls(pre=pre, post=post, aggregation=aggregation)
@property
def pre(self) -> Optional[SequentialTabularTransformations]:
"""
Returns
-------
SequentialTabularTransformations, optional
"""
return self._pre
@pre.setter
def pre(self, value: Optional[TabularTransformationsType]):
if value:
self._pre: Optional[
SequentialTabularTransformations
] = SequentialTabularTransformations(value)
else:
self._pre = None
@property
def post(self) -> Optional[SequentialTabularTransformations]:
"""
Returns
-------
SequentialTabularTransformations, optional
"""
return self._post
@post.setter
def post(self, value: Optional[TabularTransformationsType]):
if value:
self._post: Optional[
SequentialTabularTransformations
] = SequentialTabularTransformations(value)
else:
self._post = None
@property
def aggregation(self) -> Optional[TabularAggregation]:
"""
Returns
-------
TabularAggregation, optional
"""
return self._aggregation
@aggregation.setter
def aggregation(self, value: Optional[Union[str, TabularAggregation]]):
"""
Parameters
----------
value
"""
if value:
self._aggregation: Optional[TabularAggregation] = TabularAggregation.parse(value)
else:
self._aggregation = None
def pre_forward(
self, inputs: TabularData, transformations: Optional[TabularTransformationsType] = None
) -> TabularData:
"""Method that's typically called before the forward method for pre-processing.
Parameters
----------
inputs: TabularData
input-data, typically the output of the forward method.
transformations: TabularAggregationType, optional
Returns
-------
TabularData
"""
return self._maybe_apply_transformations(
inputs, transformations=transformations or self.pre
)
def forward(self, x: TabularData, *args, **kwargs) -> TabularData:
return x
def post_forward(
self,
inputs: TabularData,
transformations: Optional[TabularTransformationsType] = None,
merge_with: Union["TabularModule", List["TabularModule"]] = None,
aggregation: Optional[TabularAggregationType] = None,
) -> TensorOrTabularData:
"""Method that's typically called after the forward method for post-processing.
Parameters
----------
inputs: TabularData
input-data, typically the output of the forward method.
transformations: TabularTransformationType, optional
Transformations to apply on the input data.
merge_with: Union[TabularModule, List[TabularModule]], optional
Other TabularModule's to call and merge the outputs with.
aggregation: TabularAggregationType, optional
Aggregation to aggregate the output to a single Tensor.
Returns
-------
TensorOrTabularData (Tensor when aggregation is set, else TabularData)
"""
_aggregation: Optional[TabularAggregation]
if aggregation:
_aggregation = TabularAggregation.parse(aggregation)
else:
_aggregation = getattr(self, "aggregation", None)
outputs = inputs
if merge_with:
if not isinstance(merge_with, list):
merge_with = [merge_with]
for layer_or_tensor in merge_with:
to_add = layer_or_tensor(inputs) if callable(layer_or_tensor) else layer_or_tensor
outputs.update(to_add)
outputs = self._maybe_apply_transformations(
outputs, transformations=transformations or self.post
)
if _aggregation:
schema = getattr(self, "schema", None)
_aggregation.set_schema(schema)
return _aggregation(outputs)
return outputs
def __call__(
self,
inputs: TabularData,
*args,
pre: Optional[TabularTransformationsType] = None,
post: Optional[TabularTransformationsType] = None,
merge_with: Union["TabularModule", List["TabularModule"]] = None,
aggregation: Optional[TabularAggregationType] = None,
**kwargs,
) -> TensorOrTabularData:
"""We overwrite the call method in order to be able to do pre- and post-processing.
Parameters
----------
inputs: TabularData
Input TabularData.
pre: TabularTransformationType, optional
Transformations to apply before calling the forward method. If pre is None, this method
will check if `self.pre` is set.
post: TabularTransformationType, optional
Transformations to apply after calling the forward method. If post is None, this method
will check if `self.post` is set.
merge_with: Union[TabularModule, List[TabularModule]]
Other TabularModule's to call and merge the outputs with.
aggregation: TabularAggregationType, optional
Aggregation to aggregate the output to a single Tensor.
Returns
-------
TensorOrTabularData (Tensor when aggregation is set, else TabularData)
"""
inputs = self.pre_forward(inputs, transformations=pre)
# This will call the `forward` method implemented by the super class.
outputs = super().__call__(inputs, *args, **kwargs) # noqa
if isinstance(outputs, dict):
outputs = self.post_forward(
outputs, transformations=post, merge_with=merge_with, aggregation=aggregation
)
return outputs
def _maybe_apply_transformations(
self,
inputs: TabularData,
transformations: Optional[
Union[TabularTransformationsType, SequentialTabularTransformations]
] = None,
) -> TabularData:
"""Apply transformations to the inputs if these are defined.
Parameters
----------
inputs
transformations
Returns
-------
"""
if transformations:
_transformations = TabularTransformation.parse(transformations)
return _transformations(inputs)
return inputs
def __rrshift__(self, other):
return right_shift_block(self, other)
class FilterFeatures(TabularTransformation):
"""Module that filters out certain features from `TabularData`."
Parameters
----------
to_include: List[str]
List of features to include in the result of calling the module
pop: bool
Boolean indicating whether to pop the features to exclude from the inputs dictionary.
"""
def __init__(self, to_include: List[str], pop: bool = False):
super().__init__()
self.to_include = to_include
self.pop = pop
def forward(self, inputs: TabularData, **kwargs) -> TabularData:
"""
Parameters
----------
inputs: TabularData
Input dictionary containing features to filter.
Returns Filtered TabularData that only contains the feature-names in `self.to_include`.
-------
"""
assert isinstance(inputs, dict), "Inputs needs to be a dict"
outputs = {k: v for k, v in inputs.items() if k in self.to_include}
if self.pop:
for key in outputs.keys():
inputs.pop(key)
return outputs
def forward_output_size(self, input_shape):
"""
Parameters
----------
input_shape
Returns
-------
"""
return {k: v for k, v in input_shape.items() if k in self.to_include}
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING)
class TabularBlock(BlockBase, TabularModule, ABC):
"""TabularBlock extends TabularModule to turn it into a block with output size info.
Parameters
----------
{tabular_module_parameters}
"""
def __init__(
self,
pre: Optional[TabularTransformationType] = None,
post: Optional[TabularTransformationType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
):
super().__init__(pre=pre, post=post, aggregation=aggregation)
self.schema = schema
def to_module(self, shape_or_module, device=None):
shape = shape_or_module
if isinstance(shape_or_module, torch.nn.Module):
shape = getattr(shape_or_module, "output_size", None)
if shape:
shape = shape()
return self.build(shape, device=device)
def output_size(self, input_size=None):
if self.pre:
input_size = self.pre.output_size(input_size)
output_size = self._check_post_output_size(super().output_size(input_size))
return output_size
def build(self, input_size, schema=None, **kwargs):
output = super().build(input_size, schema=schema, **kwargs)
output_size = input_size
if self.pre:
self.pre.build(input_size, schema=schema, **kwargs)
output_size = self.pre.output_size(input_size)
output_size = self.forward_output_size(output_size)
if self.post:
self.post.build(output_size, schema=schema, **kwargs)
output_size = self.post.output_size(output_size)
if self.aggregation:
self.aggregation.build(output_size, schema=schema, **kwargs)
return output
def _check_post_output_size(self, input_size):
output_size = input_size
if isinstance(input_size, dict):
if self.post:
output_size = self.post.output_size(output_size)
if self.aggregation:
schema = getattr(self, "schema", None)
# self.aggregation.build(output_size, schema=schema)
self.aggregation.set_schema(schema)
output_size = self.aggregation.forward_output_size(output_size)
return output_size
def __rrshift__(self, other):
return right_shift_block(self, other)
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING)
class MergeTabular(TabularBlock):
"""Merge multiple TabularModule's into a single output of TabularData.
Parameters
----------
modules_to_merge: Union[TabularModule, Dict[str, TabularModule]]
TabularModules to merge into, this can also be one or multiple dictionaries keyed by the
name the module should have.
{tabular_module_parameters}
"""
def __init__(
self,
*modules_to_merge: Union[TabularModule, Dict[str, TabularModule]],
pre: Optional[TabularTransformationType] = None,
post: Optional[TabularTransformationType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
):
super().__init__(pre=pre, post=post, aggregation=aggregation, schema=schema)
self.to_merge: Union[torch.nn.ModuleDict, torch.nn.ModuleList]
if all(isinstance(x, dict) for x in modules_to_merge):
to_merge: Dict[str, TabularModule]
to_merge = reduce(lambda a, b: dict(a, **b), modules_to_merge) # type: ignore
self.to_merge = torch.nn.ModuleDict(to_merge)
elif all(isinstance(x, torch.nn.Module) for x in modules_to_merge):
self.to_merge = torch.nn.ModuleList(modules_to_merge) # type: ignore
else:
raise ValueError(
"Please provide one or multiple TabularBlock's to merge or "
f"dictionaries of TabularBlocks. got: {modules_to_merge}"
)
# Merge schemas if necessary.
if not schema and all(getattr(m, "schema", False) for m in self.merge_values):
self.schema = reduce(lambda a, b: a + b, [m.schema for m in self.merge_values])
@property
def merge_values(self):
if isinstance(self.to_merge, torch.nn.ModuleDict):
return list(self.to_merge.values())
return self.to_merge
def forward(self, inputs: TabularData, training=False, **kwargs) -> TabularData: # type: ignore
assert isinstance(inputs, dict), "Inputs needs to be a dict"
outputs = {}
for layer in self.merge_values:
outputs.update(layer(inputs))
return outputs
def forward_output_size(self, input_size):
output_shapes = {}
for layer in self.merge_values:
output_shapes.update(layer.forward_output_size(input_size))
return super(MergeTabular, self).forward_output_size(output_shapes)
def build(self, input_size, **kwargs):
super().build(input_size, **kwargs)
for layer in self.merge_values:
layer.build(input_size, **kwargs)
return self
class AsTabular(TabularBlock):
"""Converts a Tensor to TabularData by converting it to a dictionary.
Parameters
----------
output_name: str
Name that should be used as the key in the output dictionary.
"""
def __init__(self, output_name: str):
super().__init__()
self.output_name = output_name
def forward(self, inputs: torch.Tensor, **kwargs) -> TabularData: # type: ignore
return {self.output_name: inputs}
def forward_output_size(self, input_size):
return {self.output_name: input_size}
def merge_tabular(self, other):
return MergeTabular(self, other)
TabularModule.__add__ = merge_tabular # type: ignore
TabularModule.merge = merge_tabular # type: ignore
| 34.242947
| 100
| 0.651028
|
af2392fe001782934da3ce2fccc19bfcbe857e4f
| 5,859
|
bzl
|
Python
|
tools/ng_setup_workspace.bzl
|
VamsiVempati/angular
|
732026c3f56a91b016fcf79ebc581569d7c89618
|
[
"MIT"
] | 1
|
2018-12-06T02:59:02.000Z
|
2018-12-06T02:59:02.000Z
|
tools/ng_setup_workspace.bzl
|
VamsiVempati/angular
|
732026c3f56a91b016fcf79ebc581569d7c89618
|
[
"MIT"
] | 2
|
2022-03-02T11:13:43.000Z
|
2022-03-02T11:13:47.000Z
|
tools/ng_setup_workspace.bzl
|
VamsiVempati/angular
|
732026c3f56a91b016fcf79ebc581569d7c89618
|
[
"MIT"
] | null | null | null |
# Copyright Google Inc. All Rights Reserved.
#
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE file at https://angular.io/license
"Install angular source dependencies"
load("@build_bazel_rules_nodejs//:defs.bzl", "yarn_install")
load("@angular//packages/bazel/src:ng_setup_workspace.bzl", _ng_setup_workspace = "ng_setup_workspace")
def ng_setup_workspace():
"""This repository rule should be called from your WORKSPACE file.
It creates some additional Bazel external repositories that are used internally
to build angular
"""
yarn_install(
name = "angular_deps",
package_json = "@angular//:package.json",
yarn_lock = "@angular//:yarn.lock",
data = ["@angular//:tools/yarn/check-yarn.js", "@angular//:tools/postinstall-patches.js"],
node_modules_filegroup = """
filegroup(
name = "node_modules",
srcs = glob(["/".join([
"node_modules",
pkg,
"**",
ext,
]) for pkg in [
"adm-zip",
"ajv",
"angular",
"angular-1.5",
"angular-1.6",
"angular-mocks",
"angular-mocks-1.5",
"angular-mocks-1.6",
"anymatch",
"arr-diff",
"arr-flatten",
"arr-union",
"array-unique",
"asn1",
"assert-plus",
"assign-symbols",
"async-each",
"asynckit",
"atob",
"aws-sign2",
"aws4",
"balanced-match",
"base",
"base64-js",
"binary-extensions",
"blocking-proxy",
"brace-expansion",
"braces",
"bytebuffer",
"cache-base",
"caseless",
"chokidar",
"class-utils",
"co",
"collection-visit",
"convert-source-map",
"combined-stream",
"component-emitter",
"concat-map",
"copy-descriptor",
"core-util-is",
"debug",
"decode-uri-component",
"define-property",
"delayed-stream",
"domino",
"expand-brackets",
"expand-range",
"extend",
"extend-shallow",
"extglob",
"extsprintf",
"fast-deep-equal",
"fast-json-stable-stringify",
"filename-regex",
"fill-range",
"for-in",
"for-own",
"forever-agent",
"form-data",
"fragment-cache",
"fs.realpath",
"get-value",
"glob",
"glob-base",
"glob-parent",
"graceful-fs",
"hammerjs",
"har-schema",
"har-validator",
"has-value",
"has-values",
"http-signature",
"https-proxy-agent",
"inflight",
"inherits",
"is-accessor-descriptor",
"is-binary-path",
"is-buffer",
"is-data-descriptor",
"is-descriptor",
"is-dotfile",
"is-equal-shallow",
"is-extendable",
"is-extglob",
"is-glob",
"is-number",
"is-plain-object",
"is-posix-bracket",
"is-primitive",
"is-typedarray",
"is-windows",
"isarray",
"isobject",
"isstream",
"jasmine",
"jasmine-core",
"jasminewd2",
"json-schema",
"json-schema-traverse",
"json-stable-stringify",
"json-stringify-safe",
"jsprim",
"kind-of",
"long",
"lru-cache",
"magic-string",
"map-cache",
"map-visit",
"math-random",
"micromatch",
"mime-db",
"mime-types",
"minimatch",
"minimist",
"mixin-deep",
"mock-fs",
"nanomatch",
"normalize-path",
"oauth-sign",
"object.omit",
"object.pick",
"object-copy",
"object-visit",
"once",
"optimist",
"options",
"os-tmpdir",
"parse-glob",
"pascalcase",
"path-dirname",
"path-is-absolute",
"performance-now",
"posix-character-classes",
"preserve",
"process-nextick-args",
"protobufjs",
"protractor",
"qs",
"randomatic",
"readable-stream",
"readdirp",
"reflect-metadata",
"regex-cache",
"regex-not",
"remove-trailing-separator",
"repeat-element",
"repeat-string",
"request",
"ret",
"rimraf",
"safe-buffer",
"safe-regex",
"safer-buffer",
"sax",
"semver",
"set-immediate-shim",
"set-value",
"shelljs",
"sigmund",
"snapdragon",
"snapdragon-node",
"snapdragon-util",
"source-map",
"source-map-resolve",
"source-map-support",
"source-map-url",
"sourcemap-codec",
"split-string",
"sshpk",
"static-extend",
"stringstream",
"tmp",
"to-object-path",
"to-regex",
"to-regex-range",
"tough-cookie",
"tsickle",
"tslib",
"tsutils",
"tunnel-agent",
"typescript",
"union-value",
"unset-value",
"upath",
"uri-js",
"urix",
"use",
"util-deprecate",
"uuid",
"verror",
"webdriver-js-extender",
"webdriver-manager",
"wordwrap",
"wrappy",
"xhr2",
"xml2js",
"xmlbuilder",
"zone.js",
"@angular-devkit/core",
"@angular-devkit/schematics",
"@types",
"@webcomponents/custom-elements",
] for ext in [
"*.js",
"*.json",
"*.d.ts",
]] + [
"node_modules/protractor/**",
"node_modules/@schematics/angular/**",
]))
""")
_ng_setup_workspace()
| 23.914286
| 103
| 0.480116
|
ec37b150a2fa7d3f8e98bdbe2d38e815ba7bfc10
| 681
|
py
|
Python
|
exercise19.py
|
ChinenyeEzeakor/exercise-1.1
|
a027c2729b297240c039fd45a31e76272ae17d13
|
[
"MIT"
] | null | null | null |
exercise19.py
|
ChinenyeEzeakor/exercise-1.1
|
a027c2729b297240c039fd45a31e76272ae17d13
|
[
"MIT"
] | null | null | null |
exercise19.py
|
ChinenyeEzeakor/exercise-1.1
|
a027c2729b297240c039fd45a31e76272ae17d13
|
[
"MIT"
] | null | null | null |
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
cheese_and_crackers(20, 30)
print "OR, we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
cheese_and_crackers(10 + 20, 5 + 6)
print "And we can combine the two, variables and math:"
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
| 28.375
| 71
| 0.769457
|
eff3600a3afd347b07faa17e9be5ce4fe65331c2
| 39,787
|
py
|
Python
|
student_STARTUP/STARTUP.py
|
Aamer98/STARTUP_explore
|
ead758ef2f3b74c97a749687a9cb57a31586cc0f
|
[
"MIT"
] | null | null | null |
student_STARTUP/STARTUP.py
|
Aamer98/STARTUP_explore
|
ead758ef2f3b74c97a749687a9cb57a31586cc0f
|
[
"MIT"
] | null | null | null |
student_STARTUP/STARTUP.py
|
Aamer98/STARTUP_explore
|
ead758ef2f3b74c97a749687a9cb57a31586cc0f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchvision import transforms, datasets
import torch.utils.data
torch.cuda.empty_cache()
from tqdm import tqdm
import argparse
import os
import numpy as np
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..'))
import utils
import data
import time
import models
# import wandb
import warnings
import random
from collections import OrderedDict
from datasets import ISIC_few_shot, EuroSAT_few_shot, CropDisease_few_shot, Chest_few_shot
from datasets import miniImageNet_few_shot, tiered_ImageNet_few_shot, ImageNet_few_shot
import copy
import math
import warnings
from nx_ent import NTXentLoss
class projector_SIMCLR(nn.Module):
'''
The projector for SimCLR. This is added on top of a backbone for SimCLR Training
'''
def __init__(self, in_dim, out_dim):
super(projector_SIMCLR, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.fc1 = nn.Linear(in_dim, in_dim)
self.fc2 = nn.Linear(in_dim, out_dim)
def forward(self, x):
return self.fc2(F.relu(self.fc1(x)))
class apply_twice:
'''
A wrapper for torchvision transform. The transform is applied twice for
SimCLR training
'''
def __init__(self, transform, transform2=None):
self.transform = transform
if transform2 is not None:
self.transform2 = transform2
else:
self.transform2 = transform
def __call__(self, img):
return self.transform(img), self.transform2(img)
def pseudolabel_dataset(embedding, clf, dataset, transform, transform_test, params):
'''
pseudolabel the dataset with the teacher model (embedding, clf)
'''
# Change the transform of the target dataset to the deterministic transformation
dataset.d.transform = transform_test
dataset.d.target_transform = (lambda x: x)
embedding.eval()
clf.eval()
loader = torch.utils.data.DataLoader(dataset, batch_size=params.bsize,
shuffle=False, drop_last=False, num_workers=params.num_workers)
# do an inference on the full target dataset
probs_all = []
for X, _ in loader:
X = X.cuda()
with torch.no_grad():
feature = embedding(X)
logits = clf(feature)
probs = F.softmax(logits, dim=1)
probs += 1e-6
probs_all.append(probs)
probs_all = torch.cat(probs_all, dim=0).cpu()
# Update the target dataset with the pseudolabel
if hasattr(dataset.d, 'targets'):
dataset.d.targets = probs_all
samples = [(i[0], probs_all[ind_i])for ind_i, i in enumerate(dataset.d.samples)]
dataset.d.samples = samples
dataset.d.imgs = samples
elif hasattr(dataset.d, "labels"):
dataset.d.labels = probs_all
else:
raise ValueError("No Targets variable found!")
# Switch the dataset's augmentation back to the stochastic augmentation
dataset.d.transform = transform
return dataset
def main(args):
torch.cuda.empty_cache()
# Set the scenes
if not os.path.isdir(args.dir):
os.makedirs(args.dir)
logger = utils.create_logger(os.path.join(
args.dir, 'checkpoint.log'), __name__)
trainlog = utils.savelog(args.dir, 'train')
vallog = utils.savelog(args.dir, 'val')
# wandb.init(project='STARTUP',
# group=__file__,
# name=f'{__file__}_{args.dir}')
# wandb.config.update(args)
for arg in vars(args):
logger.info(f"{arg}: {getattr(args, arg)}")
# seed the random number generator
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
###########################
# Create Models
###########################
if args.model == 'resnet10':
backbone = models.ResNet10()
feature_dim = backbone.final_feat_dim
elif args.model == 'resnet12':
backbone = models.Resnet12(width=1, dropout=0.1)
feature_dim = backbone.output_size
elif args.model == 'resnet18':
backbone = models.resnet18(remove_last_relu=False,
input_high_res=True)
feature_dim = 512
else:
raise ValueError('Invalid backbone model')
backbone_sd_init = copy.deepcopy(backbone.state_dict())
# load the teacher
# specified at args.teacher_path
if args.teacher_path is not None:
if args.teacher_path_version == 0:
print(os.path.dirname(os.path.abspath(__file__)))
state = torch.load(args.teacher_path)['state']
clf_state = OrderedDict()
state_keys = list(state.keys())
for _, key in enumerate(state_keys):
if "feature." in key:
# an architecture model has attribute 'feature', load architecture
# feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
newkey = key.replace("feature.", "")
state[newkey] = state.pop(key)
elif "classifier." in key:
newkey = key.replace("classifier.", "")
clf_state[newkey] = state.pop(key)
else:
state.pop(key)
sd = state
clf_sd = clf_state
elif args.teacher_path_version == 1:
temp = torch.load(args.teacher_path)
sd = temp['model']
clf_sd = temp['clf']
else:
raise ValueError("Invalid load path version!")
backbone.load_state_dict(sd)
backbone = nn.DataParallel(backbone).cuda()
num_classes = clf_sd['weight'].shape[0]
clf_teacher = nn.Linear(feature_dim, num_classes).cuda()
clf_teacher.load_state_dict(clf_sd)
# the student classifier head
clf = nn.Linear(feature_dim, num_classes).cuda()
# initialize the student classifier head with the teacher classifier head
if args.use_pretrained_clf:
print("Loading Pretrained Classifier")
clf.load_state_dict(clf_sd)
# projection head for SimCLR
clf_SIMCLR = projector_SIMCLR(feature_dim, args.projection_dim).cuda()
############################
###########################
# Create DataLoader
###########################
# create the base dataset
if args.base_dataset == 'miniImageNet':
base_transform = miniImageNet_few_shot.TransformLoader(args.image_size).get_composed_transform(aug=True)
base_transform_test = miniImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
base_dataset = datasets.ImageFolder(root=args.base_path, transform=base_transform)
if args.base_split is not None:
base_dataset = miniImageNet_few_shot.construct_subset(
base_dataset, args.base_split)
elif args.base_dataset == 'tiered_ImageNet':
if args.image_size != 84:
warnings.warn("Tiered ImageNet: The image size for is not 84x84")
base_transform = tiered_ImageNet_few_shot.TransformLoader(args.image_size).get_composed_transform(aug=False)
base_transform_test = tiered_ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
base_dataset = datasets.ImageFolder(
root=args.base_path, transform=base_transform)
if args.base_split is not None:
base_dataset = tiered_ImageNet_few_shot.construct_subset(
base_dataset, args.base_split)
elif args.base_dataset == 'ImageNet':
if args.base_no_color_jitter:
base_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
warnings.warn("Using ImageNet with Color Jitter")
base_transform = ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
base_transform_test = ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
base_dataset = datasets.ImageFolder(
root=args.base_path, transform=base_transform)
if args.base_split is not None:
base_dataset = ImageNet_few_shot.construct_subset(base_dataset, args.base_split)
print("Size of Base dataset:", len(base_dataset))
else:
raise ValueError("Invalid base dataset!")
# create the target dataset
if args.target_dataset == 'ISIC':
transform = ISIC_few_shot.TransformLoader(args.image_size).get_composed_transform(aug=True)
transform_test = ISIC_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = ISIC_few_shot.SimpleDataset(transform, split=args.target_subset_split)
elif args.target_dataset == 'EuroSAT':
transform = EuroSAT_few_shot.TransformLoader(args.image_size).get_composed_transform(aug=True)
transform_test = EuroSAT_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = EuroSAT_few_shot.SimpleDataset(transform, split=args.target_subset_split)
elif args.target_dataset == 'CropDisease':
transform = CropDisease_few_shot.TransformLoader(args.image_size).get_composed_transform(aug=True)
transform_test = CropDisease_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = CropDisease_few_shot.SimpleDataset(
transform, split=args.target_subset_split)
elif args.target_dataset == 'ChestX':
transform = Chest_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
transform_test = Chest_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = Chest_few_shot.SimpleDataset(transform, split=args.target_subset_split)
elif args.target_dataset == 'miniImageNet_test':
transform = miniImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
transform_test = miniImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = miniImageNet_few_shot.SimpleDataset(
transform, split=args.target_subset_split)
elif args.target_dataset == 'tiered_ImageNet_test':
if args.image_size != 84:
warnings.warn("Tiered ImageNet: The image size for is not 84x84")
transform = tiered_ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
transform_test = tiered_ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = tiered_ImageNet_few_shot.SimpleDataset(
transform, split=args.target_subset_split)
else:
raise ValueError('Invalid dataset!')
# pseudolabel target dataset
dataset = pseudolabel_dataset(backbone, clf_teacher, dataset,
transform, transform_test, args)
print("Size of target dataset", len(dataset))
dataset_test = copy.deepcopy(dataset)
transform_twice = apply_twice(transform)
transform_test_twice = apply_twice(transform_test, transform)
dataset.d.transform = transform_twice
dataset_test.d.transform = transform_test_twice
ind = torch.randperm(len(dataset))
# initialize the student's backbone with random weights
if args.backbone_random_init:
backbone.module.load_state_dict(backbone_sd_init)
# split the target dataset into train and val
# 10% of the unlabeled data is used for validation
train_ind = ind[:int(0.9*len(ind))]
val_ind = ind[int(0.9*len(ind)):]
trainset = torch.utils.data.Subset(dataset, train_ind)
valset = torch.utils.data.Subset(dataset_test, val_ind)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.bsize,
num_workers=args.num_workers,
shuffle=True, drop_last=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=args.bsize,
num_workers=args.num_workers,
shuffle=False, drop_last=False)
# Generate trainset and valset for base dataset
base_ind = torch.randperm(len(base_dataset))
base_train_ind = base_ind[:int((1 - args.base_val_ratio)*len(base_ind))]
base_val_ind = base_ind[int((1 - args.base_val_ratio)*len(base_ind)):]
base_dataset_val = copy.deepcopy(base_dataset)
base_dataset_val.transform = base_transform_test
base_trainset = torch.utils.data.Subset(base_dataset, base_train_ind)
base_valset = torch.utils.data.Subset(base_dataset_val, base_val_ind)
print("Size of base validation set", len(base_valset))
base_trainloader = torch.utils.data.DataLoader(base_trainset, batch_size=args.bsize,
num_workers=args.num_workers,
shuffle=True, drop_last=True)
base_valloader = torch.utils.data.DataLoader(base_valset, batch_size=args.bsize * 2,
num_workers=args.num_workers,
shuffle=False, drop_last=False)
############################
############################
# Specify Loss Function
############################
criterion = NTXentLoss('cuda', args.bsize, args.temp, True)
if args.batch_validate:
criterion_val = NTXentLoss('cuda', args.bsize, args.temp, True)
else:
criterion_val = NTXentLoss('cuda', len(valset), args.temp, True)
############################
###########################
# Create Optimizer
###########################
optimizer = torch.optim.SGD([
{'params': backbone.parameters()},
{'params': clf.parameters()},
{'params': clf_SIMCLR.parameters()}
],
lr=0.1, momentum=0.9,
weight_decay=args.wd,
nesterov=False)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min', factor=0.5,
patience=10, verbose=False,
cooldown=10,
threshold_mode='rel',
threshold=1e-4, min_lr=1e-5)
#######################################
starting_epoch = 0
# whether to resume from the latest checkpoint
if args.resume_latest:
import re
pattern = "checkpoint_(\d+).pkl"
candidate = []
for i in os.listdir(args.dir):
match = re.search(pattern, i)
if match:
candidate.append(int(match.group(1)))
# if nothing found, then start from scratch
if len(candidate) == 0:
print('No latest candidate found to resume!')
logger.info('No latest candidate found to resume!')
else:
latest = np.amax(candidate)
load_path = os.path.join(args.dir, f'checkpoint_{latest}.pkl')
if latest >= args.epochs:
print('The latest checkpoint found ({}) is after the number of epochs (={}) specified! Exiting!'.format(
load_path, args.epochs))
logger.info('The latest checkpoint found ({}) is after the number of epochs (={}) specified! Exiting!'.format(
load_path, args.epochs))
import sys
sys.exit(0)
else:
best_model_path = os.path.join(args.dir, 'checkpoint_best.pkl')
# first load the previous best model
best_epoch = load_checkpoint(backbone, clf, clf_SIMCLR,
optimizer, scheduler, best_model_path)
logger.info('Latest model epoch: {}'.format(latest))
logger.info(
'Validate the best model checkpointed at epoch: {}'.format(best_epoch))
# Validate to set the right loss
performance_val = validate(backbone, clf, clf_SIMCLR,
valloader, base_valloader, criterion_val,
best_epoch, args.epochs, logger, vallog, args, postfix='Validation')
loss_val = performance_val['Loss_test/avg']
error_val = 100 - performance_val['top1_test_per_class/avg']
best_error = error_val
best_loss = loss_val
sd_best = torch.load(os.path.join(
args.dir, 'checkpoint_best.pkl'))
if latest > best_epoch:
starting_epoch = load_checkpoint(
backbone, clf, clf_SIMCLR, optimizer, scheduler, load_path)
else:
starting_epoch = best_epoch
logger.info(
'Continue Training at epoch: {}'.format(starting_epoch))
###########################################
####### Learning rate test ################
###########################################
if starting_epoch == 0:
### Start by doing a learning rate test
lr_candidates = [1e-1, 5e-2, 3e-2, 1e-2, 5e-3, 3e-3, 1e-3]
step = 50
# number of training epochs to get at least 50 updates
warm_up_epoch = math.ceil(step / len(trainloader))
# keep track of the student model initialization
# Need to keep reloading when testing different learning rates
sd_current = copy.deepcopy(backbone.state_dict())
sd_head = copy.deepcopy(clf.state_dict())
sd_head_SIMCLR = copy.deepcopy(clf_SIMCLR.state_dict())
vals = []
# Test the learning rate by training for one epoch
for current_lr in lr_candidates:
lr_log = utils.savelog(args.dir, f'lr_{current_lr}')
# reload the student model
backbone.load_state_dict(sd_current)
clf.load_state_dict(sd_head)
clf_SIMCLR.load_state_dict(sd_head_SIMCLR)
# create the optimizer
optimizer = torch.optim.SGD([
{'params': backbone.parameters()},
{'params': clf.parameters()},
{'params': clf_SIMCLR.parameters()}
],
lr=current_lr, momentum=0.9,
weight_decay=args.wd,
nesterov=False)
logger.info(f'*** Testing Learning Rate: {current_lr}')
# training for a bit
for i in range(warm_up_epoch):
perf = train(backbone, clf, clf_SIMCLR, optimizer,
trainloader, base_trainloader, criterion,
i, warm_up_epoch, logger, lr_log, args, turn_off_sync=True)
# compute the validation loss for picking learning rates
perf_val = validate(backbone, clf, clf_SIMCLR, valloader,
base_valloader, criterion_val,
1, 1, logger, vallog, args, postfix='Validation',
turn_off_sync=True)
vals.append(perf_val['Loss_test/avg'])
# pick the best learning rates
current_lr = lr_candidates[int(np.argmin(vals))]
# reload the models
backbone.load_state_dict(sd_current)
clf.load_state_dict(sd_head)
clf_SIMCLR.load_state_dict(sd_head_SIMCLR)
logger.info(f"** Learning with lr: {current_lr}")
optimizer = torch.optim.SGD([
{'params': backbone.parameters()},
{'params': clf.parameters()},
{'params': clf_SIMCLR.parameters()}
],
lr=current_lr, momentum=0.9,
weight_decay=args.wd,
nesterov=False)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min', factor=0.5,
patience=10, verbose=False,
cooldown=10,
threshold_mode='rel',
threshold=1e-4, min_lr=1e-5)
scheduler.step(math.inf)
best_loss = math.inf
best_epoch = 0
checkpoint(backbone, clf, clf_SIMCLR,
optimizer, scheduler, os.path.join(
args.dir, f'checkpoint_best.pkl'), 0)
############################
# save the initialization
checkpoint(backbone, clf, clf_SIMCLR,
optimizer, scheduler,
os.path.join(
args.dir, f'checkpoint_{starting_epoch}.pkl'), starting_epoch)
try:
for epoch in tqdm(range(starting_epoch, args.epochs)):
perf = train(backbone, clf, clf_SIMCLR, optimizer, trainloader,
base_trainloader, criterion,
epoch, args.epochs, logger, trainlog, args)
scheduler.step(perf['Loss/avg'])
# Always checkpoint after first epoch of training
if (epoch == starting_epoch) or ((epoch + 1) % args.save_freq == 0):
checkpoint(backbone, clf, clf_SIMCLR,
optimizer, scheduler,
os.path.join(
args.dir, f'checkpoint_{epoch + 1}.pkl'), epoch + 1)
if (epoch == starting_epoch) or ((epoch + 1) % args.eval_freq == 0):
performance_val = validate(backbone, clf, clf_SIMCLR, valloader,
base_valloader, criterion_val,
epoch+1, args.epochs, logger, vallog, args, postfix='Validation')
loss_val = performance_val['Loss_test/avg']
if best_loss > loss_val:
best_epoch = epoch + 1
checkpoint(backbone, clf, clf_SIMCLR,
optimizer, scheduler, os.path.join(
args.dir, f'checkpoint_best.pkl'), best_epoch)
logger.info(
f"*** Best model checkpointed at Epoch {best_epoch}")
best_loss = loss_val
if (epoch + 1) % args.save_freq != 0:
checkpoint(backbone, clf, clf_SIMCLR,
optimizer, scheduler, os.path.join(
args.dir, f'checkpoint_{epoch + 1}.pkl'), epoch + 1)
finally:
trainlog.save()
vallog.save()
return
def checkpoint(model, clf, clf_SIMCLR, optimizer, scheduler, save_path, epoch):
'''
epoch: the number of epochs of training that has been done
Should resume from epoch
'''
sd = {
'model': copy.deepcopy(model.module.state_dict()),
'clf': copy.deepcopy(clf.state_dict()),
'clf_SIMCLR': copy.deepcopy(clf_SIMCLR.state_dict()),
'opt': copy.deepcopy(optimizer.state_dict()),
'scheduler': copy.deepcopy(scheduler.state_dict()),
'epoch': epoch
}
torch.save(sd, save_path)
return sd
def load_checkpoint(model, clf, clf_SIMCLR, optimizer, scheduler, load_path):
'''
Load model and optimizer from load path
Return the epoch to continue the checkpoint
'''
sd = torch.load(load_path)
model.module.load_state_dict(sd['model'])
clf.load_state_dict(sd['clf'])
clf_SIMCLR.load_state_dict(sd['clf_SIMCLR'])
optimizer.load_state_dict(sd['opt'])
scheduler.load_state_dict(sd['scheduler'])
return sd['epoch']
def train(model, clf, clf_SIMCLR,
optimizer, trainloader, base_trainloader, criterion_SIMCLR, epoch,
num_epochs, logger, trainlog, args, turn_off_sync=False):
meters = utils.AverageMeterSet()
model.train()
clf.train()
clf_SIMCLR.train()
kl_criterion = nn.KLDivLoss(reduction='batchmean')
nll_criterion = nn.NLLLoss(reduction='mean')
base_loader_iter = iter(base_trainloader)
end = time.time()
for i, ((X1, X2), y) in enumerate(trainloader):
meters.update('Data_time', time.time() - end)
current_lr = optimizer.param_groups[0]['lr']
meters.update('lr', current_lr, 1)
X1 = X1.cuda()
X2 = X2.cuda()
y = y.cuda()
# Get the data from the base dataset
try:
X_base, y_base = base_loader_iter.next()
except StopIteration:
base_loader_iter = iter(base_trainloader)
X_base, y_base = base_loader_iter.next()
X_base = X_base.cuda()
y_base = y_base.cuda()
optimizer.zero_grad()
# cross entropy loss on the base dataset
features_base = model(X_base)
logits_base = clf(features_base)
log_probability_base = F.log_softmax(logits_base, dim=1)
loss_base = nll_criterion(log_probability_base, y_base)
f1 = model(X1)
f2 = model(X2)
# SIMCLR Loss on the target dataset
z1 = clf_SIMCLR(f1)
z2 = clf_SIMCLR(f2)
loss_SIMCLR = criterion_SIMCLR(z1, z2)
# Pseudolabel loss on the target dataset
logits_xtask_1 = clf(f1)
logits_xtask_2 = clf(f2)
log_probability_1 = F.log_softmax(logits_xtask_1, dim=1)
log_probability_2 = F.log_softmax(logits_xtask_2, dim=1)
loss_xtask = (kl_criterion(log_probability_1, y) +
kl_criterion(log_probability_2, y)) / 2
loss = loss_base + loss_SIMCLR + loss_xtask
loss.backward()
optimizer.step()
meters.update('Loss', loss.item(), 1)
meters.update('KL_Loss_target', loss_xtask.item(), 1)
meters.update('CE_Loss_source', loss_base.item(), 1)
meters.update('SIMCLR_Loss_target', loss_SIMCLR.item(), 1)
perf = utils.accuracy(logits_xtask_1.data,
y.argmax(dim=1).data, topk=(1, ))
meters.update('top1', perf['average'][0].item(), len(X1))
meters.update('top1_per_class', perf['per_class_average'][0].item(), 1)
perf_base = utils.accuracy(logits_base.data,
y_base.data, topk=(1, ))
meters.update('top1_base', perf_base['average'][0].item(), len(X_base))
meters.update('top1_base_per_class', perf_base['per_class_average'][0].item(), 1)
meters.update('Batch_time', time.time() - end)
end = time.time()
if (i + 1) % args.print_freq == 0:
values = meters.values()
averages = meters.averages()
sums = meters.sums()
logger_string = ('Training Epoch: [{epoch}/{epochs}] Step: [{step} / {steps}] '
'Batch Time: {meters[Batch_time]:.4f} '
'Data Time: {meters[Data_time]:.4f} Average Loss: {meters[Loss]:.4f} '
'Average KL Loss (Target): {meters[KL_Loss_target]:.4f} '
'Average SimCLR Loss (Target): {meters[SIMCLR_Loss_target]:.4f} '
'Average CE Loss (Source): {meters[CE_Loss_source]: .4f} '
'Learning Rate: {meters[lr]:.4f} '
'Top1: {meters[top1]:.4f} '
'Top1_per_class: {meters[top1_per_class]:.4f} '
'Top1_base: {meters[top1_base]:.4f} '
'Top1_base_per_class: {meters[top1_base_per_class]:.4f} '
).format(
epoch=epoch, epochs=num_epochs, step=i+1, steps=len(trainloader), meters=meters)
logger.info(logger_string)
if (args.iteration_bp is not None) and (i+1) == args.iteration_bp:
break
logger_string = ('Training Epoch: [{epoch}/{epochs}] Step: [{step}] Batch Time: {meters[Batch_time]:.4f} '
'Data Time: {meters[Data_time]:.4f} Average Loss: {meters[Loss]:.4f} '
'Average KL Loss (Target): {meters[KL_Loss_target]:.4f} '
'Average SimCLR Loss (Target): {meters[SIMCLR_Loss_target]:.4f} '
'Average CE Loss (Source): {meters[CE_Loss_source]: .4f} '
'Learning Rate: {meters[lr]:.4f} '
'Top1: {meters[top1]:.4f} '
'Top1_per_class: {meters[top1_per_class]:.4f} '
'Top1_base: {meters[top1_base]:.4f} '
'Top1_base_per_class: {meters[top1_base_per_class]:.4f} '
).format(
epoch=epoch+1, epochs=num_epochs, step=0, meters=meters)
logger.info(logger_string)
values = meters.values()
averages = meters.averages()
sums = meters.sums()
trainlog.record(epoch+1, {
**values,
**averages,
**sums
})
# if not turn_off_sync:
# # wandb.log({'loss': averages['Loss/avg']}, step=epoch+1)
# # wandb.log(
# # {'ce_loss_source': averages['CE_Loss_source/avg']}, step=epoch+1)
# # wandb.log(
# # {'kl_loss_target': averages['KL_Loss_target/avg']}, step=epoch+1)
# # wandb.log(
# # {'SimCLR_loss_target': averages['SIMCLR_Loss_target/avg']}, step=epoch+1)
# # wandb.log({'top1': averages['top1/avg'],
# # 'top1_per_class': averages['top1_per_class/avg'],
# # }, step=epoch+1)
# # wandb.log({'top1_base': averages['top1_base/avg'],
# # 'top1_base_per_class': averages['top1_base_per_class/avg'],
# # }, step=epoch+1)
return averages
def validate(model, clf, clf_simclr,
testloader, base_loader, criterion_SIMCLR, epoch, num_epochs, logger,
testlog, args, postfix='Validation', turn_off_sync=False):
meters = utils.AverageMeterSet()
model.eval()
clf.eval()
clf_simclr.eval()
criterion_xtask = nn.KLDivLoss(reduction='batchmean')
nll_criterion = nn.NLLLoss(reduction='mean')
logits_xtask_test_all = []
if args.batch_validate:
losses_SIMCLR = []
else:
z1s = []
z2s = []
ys_all = []
end = time.time()
# Compute the loss for the target dataset
with torch.no_grad():
for _, ((Xtest, Xrand), y) in enumerate(testloader):
Xtest = Xtest.cuda()
Xrand = Xrand.cuda()
y = y.cuda()
ftest = model(Xtest)
frand = model(Xrand)
ztest = clf_simclr(ftest)
zrand = clf_simclr(frand)
# get the logits for xtask
logits_xtask_test = clf(ftest)
logits_xtask_test_all.append(logits_xtask_test)
ys_all.append(y)
if args.batch_validate:
if len(Xtest) != args.bsize:
criterion_small_set = NTXentLoss(
'cuda', len(Xtest), args.temp, True)
losses_SIMCLR.append(criterion_small_set(ztest, zrand))
else:
losses_SIMCLR.append(criterion_SIMCLR(ztest, zrand))
else:
z1s.append(ztest)
z2s.append(zrand)
ys_all = torch.cat(ys_all, dim=0)
logits_xtask_test_all = torch.cat(logits_xtask_test_all, dim=0)
log_probability = F.log_softmax(logits_xtask_test_all, dim=1)
loss_xtask = criterion_xtask(log_probability, ys_all)
if args.batch_validate:
loss_SIMCLR = torch.stack(losses_SIMCLR).mean()
else:
z1s = torch.cat(z1s, dim=0)
z2s = torch.cat(z2s, dim=0)
loss_SIMCLR = criterion_SIMCLR(z1s, z2s)
logits_base_all = []
ys_base_all = []
with torch.no_grad():
# Compute the loss on the source base dataset
for X_base, y_base in base_loader:
X_base = X_base.cuda()
y_base = y_base.cuda()
features = model(X_base)
logits_base = clf(features)
logits_base_all.append(logits_base)
ys_base_all.append(y_base)
ys_base_all = torch.cat(ys_base_all, dim=0)
logits_base_all = torch.cat(logits_base_all, dim=0)
log_probability_base = F.log_softmax(logits_base_all, dim=1)
loss_base = nll_criterion(log_probability_base, ys_base_all)
loss = loss_xtask + loss_SIMCLR + loss_base
meters.update('CE_Loss_source_test', loss_base.item(), 1)
meters.update('KL_Loss_target_test', loss_xtask.item(), 1)
meters.update('SIMCLR_Loss_target_test', loss_SIMCLR.item(), 1)
meters.update('Loss_test', loss.item(), 1)
perf = utils.accuracy(logits_xtask_test_all.data,
ys_all.argmax(dim=1).data, topk=(1, ))
meters.update('top1_test', perf['average'][0].item(), 1)
meters.update('top1_test_per_class',
perf['per_class_average'][0].item(), 1)
perf_base = utils.accuracy(logits_base_all.data,
ys_base_all.data, topk=(1, ))
meters.update('top1_base_test', perf_base['average'][0].item(), 1)
meters.update('top1_base_test_per_class',
perf_base['per_class_average'][0].item(), 1)
meters.update('Batch_time', time.time() - end)
logger_string = ('{postfix} Epoch: [{epoch}/{epochs}] Batch Time: {meters[Batch_time]:.4f} '
'Average Test Loss: {meters[Loss_test]:.4f} '
'Average Test KL Loss (Target): {meters[KL_Loss_target_test]: .4f} '
'Average Test SimCLR Loss (Target): {meters[SIMCLR_Loss_target_test]: .4f} '
'Average CE Loss (Source): {meters[CE_Loss_source_test]: .4f} '
'Top1_test: {meters[top1_test]:.4f} '
'Top1_test_per_class: {meters[top1_test_per_class]:.4f} '
'Top1_base_test: {meters[top1_base_test]:.4f} '
'Top1_base_test_per_class: {meters[top1_base_test_per_class]:.4f} ').format(
postfix=postfix, epoch=epoch, epochs=num_epochs, meters=meters)
logger.info(logger_string)
values = meters.values()
averages = meters.averages()
sums = meters.sums()
testlog.record(epoch, {
**values,
**averages,
**sums
})
if postfix != '':
postfix = '_' + postfix
# if not turn_off_sync:
# wandb.log({'loss' + postfix: averages['Loss_test/avg']}, step=epoch)
# wandb.log(
# {'kl_loss_target' + postfix: averages['KL_Loss_target_test/avg']}, step=epoch)
# wandb.log(
# {'ce_loss_source' + postfix: averages['CE_Loss_source_test/avg']}, step=epoch)
# wandb.log(
# {'SimCLR_loss_target': averages['SIMCLR_Loss_target_test/avg']}, step=epoch)
# wandb.log({'top1' + postfix: averages['top1_test/avg'],
# 'top1_per_class' + postfix: averages['top1_test_per_class/avg'],
# }, step=epoch)
# wandb.log({'top1_base' + postfix: averages['top1_base_test/avg'],
# 'top1_base_per_class' + postfix: averages['top1_base_test_per_class/avg'],
# }, step=epoch)
return averages
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='STARTUP')
parser.add_argument('--dir', type=str, default='.',
help='directory to save the checkpoints')
parser.add_argument('--bsize', type=int, default=32,
help='batch_size for STARTUP')
parser.add_argument('--epochs', type=int, default=50,
help='Number of training epochs')
parser.add_argument('--save_freq', type=int, default=5,
help='Frequency (in epoch) to save')
parser.add_argument('--eval_freq', type=int, default=1,
help='Frequency (in epoch) to evaluate on the val set')
parser.add_argument('--print_freq', type=int, default=5,
help='Frequency (in step per epoch) to print training stats')
parser.add_argument('--load_path', type=str, default=None,
help='Path to the checkpoint to be loaded')
parser.add_argument('--seed', type=int, default=1,
help='Seed for randomness')
parser.add_argument('--wd', type=float, default=1e-4,
help='Weight decay for the model')
parser.add_argument('--resume_latest', action='store_true',
help='resume from the latest model in args.dir')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of workers for dataloader')
parser.add_argument('--iteration_bp', type=int, help='which step to break in the training loop')
parser.add_argument('--model', type=str, default='resnet10',
help='Backbone model')
parser.add_argument('--teacher_path', type=str, required=True,
help='path to the teacher model')
parser.add_argument('--teacher_path_version', type=int, default=1,
help='how to load the teacher')
parser.add_argument('--use_pretrained_clf', action='store_true',
help="whether to initialize student's classifier (this is the teacher classifier)")
parser.add_argument('--backbone_random_init', action='store_true',
help="Use random initialized backbone ")
parser.add_argument('--projection_dim', type=int, default=128,
help='Projection Dimension for SimCLR')
parser.add_argument('--temp', type=float, default=1,
help='Temperature of SIMCLR')
parser.add_argument('--base_dataset', type=str, required=True, help='base_dataset to use')
parser.add_argument('--base_path', type=str, required=True, help='path to base dataset')
parser.add_argument('--base_split', type=str, help='split for the base dataset')
parser.add_argument('--base_no_color_jitter', action='store_true', help='remove color jitter for ImageNet')
parser.add_argument('--base_val_ratio', type=float, default=0.05, help='amount of base dataset set aside for validation')
parser.add_argument('--batch_validate', action='store_true',
help='to do batch validate rather than validate on the full dataset (Ideally, for SimCLR,'+
' the validation should be on the full dataset but might not be feasible due to hardware constraints')
parser.add_argument('--target_dataset', type=str, required=True,
help='the target domain dataset')
parser.add_argument('--target_subset_split', type=str,
help='path to the csv files that specifies the unlabeled split for the target dataset')
parser.add_argument('--image_size', type=int, default=224,
help='Resolution of the input image')
args = parser.parse_args()
main(args)
| 39.549702
| 126
| 0.588886
|
36b2b7643c2fe314dd79d15742f1a7f58c9d7b25
| 753
|
py
|
Python
|
setup.py
|
mgrubisic/PySeismoSoil
|
f8778be18a0d141918ff3f05c2e5279f5d02cdda
|
[
"FSFAP"
] | 1
|
2020-10-05T05:47:18.000Z
|
2020-10-05T05:47:18.000Z
|
setup.py
|
mgrubisic/PySeismoSoil
|
f8778be18a0d141918ff3f05c2e5279f5d02cdda
|
[
"FSFAP"
] | null | null | null |
setup.py
|
mgrubisic/PySeismoSoil
|
f8778be18a0d141918ff3f05c2e5279f5d02cdda
|
[
"FSFAP"
] | null | null | null |
from setuptools import setup
setup(
name='PySeismoSoil',
version='v0.3.6',
description='PySeismoSoil',
author='Jian Shi',
license='BSD 3',
url='https://github.com/jsh9/PySeismoSoil',
packages=['PySeismoSoil'],
classifiers=['Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
install_requires=['numpy>=1.11.0',
'matplotlib>=2.0.0',
'scipy>=1.1.0',
'numba>=0.38.0'
],
python_requires='>=3.6',
include_package_data=True,
)
| 30.12
| 57
| 0.533865
|
d22e790f560b51447016ed3ce2c5663688b5fd74
| 6,131
|
py
|
Python
|
tests/unit/test_types.py
|
OvalMoney/momapper
|
9bcf1909a80677cab831132444be27fa4adaa2a5
|
[
"MIT"
] | null | null | null |
tests/unit/test_types.py
|
OvalMoney/momapper
|
9bcf1909a80677cab831132444be27fa4adaa2a5
|
[
"MIT"
] | null | null | null |
tests/unit/test_types.py
|
OvalMoney/momapper
|
9bcf1909a80677cab831132444be27fa4adaa2a5
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
import pytest
from bson import Decimal128
from momapper import MappedClass, Field
from momapper.mongodb.collection import MappedCollection
from momapper.types import (
DecimalType,
ValidationError,
IntType,
FloatType,
StringType,
ByteType,
BoolType,
ListType,
DictType,
)
@pytest.mark.parametrize("value, exception", [(0, None), (object(), ValidationError)])
def test_int_type(mongo_client, value, exception):
class DocWithInt(MappedClass):
value = Field("value", type_=IntType)
if exception:
with pytest.raises(exception):
DocWithInt(value=value)
else:
doc = DocWithInt(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithInt
)
collection.insert_one(doc)
@pytest.mark.parametrize("value, exception", [(0.0, None), (object(), ValidationError)])
def test_float_type(mongo_client, value, exception):
class DocWithFloat(MappedClass):
value = Field("value", type_=FloatType)
if exception:
with pytest.raises(exception):
DocWithFloat(value=value)
else:
doc = DocWithFloat(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithFloat
)
collection.insert_one(doc)
@pytest.mark.parametrize("amount", [0, 0.0, Decimal("10")])
def test_decimal_type(mongo_client, amount):
class DocWithDecimal(MappedClass):
amount = Field("amount", type_=DecimalType)
doc = DocWithDecimal(amount=amount)
assert isinstance(doc.amount, Decimal)
assert isinstance(doc._document["amount"], Decimal128)
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDecimal
)
doc_id = collection.insert_one(doc).inserted_id
fetched_doc = collection.find_one({"_id": doc_id})
assert isinstance(fetched_doc.amount, Decimal)
assert isinstance(fetched_doc._document["amount"], Decimal128)
assert doc.amount == fetched_doc.amount
def test_decimal_type_if_missing(mongo_client):
class DocWithDecimalRequired(MappedClass):
amount = Field(
"amount", type_=DecimalType, required=True, if_missing=Decimal(5)
)
doc = DocWithDecimalRequired()
assert isinstance(doc.amount, Decimal)
assert isinstance(doc._document["amount"], Decimal128)
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDecimalRequired
)
doc_id = collection.insert_one(doc).inserted_id
fetched_doc = collection.find_one({"_id": doc_id})
assert isinstance(fetched_doc.amount, Decimal)
assert isinstance(fetched_doc._document["amount"], Decimal128)
assert doc.amount == fetched_doc.amount
@pytest.mark.parametrize(
"value, exception", [("value", None), (object(), ValidationError)]
)
def test_string_type(mongo_client, value, exception):
class DocWithString(MappedClass):
value = Field("value", type_=StringType)
if exception:
with pytest.raises(exception):
DocWithString(value=value)
else:
doc = DocWithString(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithString
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [(b"value", None), (object(), ValidationError)]
)
def test_bytes_type(mongo_client, value, exception):
class DocWithBytes(MappedClass):
value = Field("value", type_=ByteType)
if exception:
with pytest.raises(exception):
DocWithBytes(value=value)
else:
doc = DocWithBytes(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithBytes
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [(False, None), (True, None), (object(), ValidationError)]
)
def test_bool_type(mongo_client, value, exception):
class DocWithBool(MappedClass):
value = Field("value", type_=BoolType)
if exception:
with pytest.raises(exception):
DocWithBool(value=value)
else:
doc = DocWithBool(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithBool
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [(["value"], None), (object(), ValidationError)]
)
def test_list_type(mongo_client, value, exception):
class DocWithList(MappedClass):
value = Field("value", type_=ListType)
if exception:
with pytest.raises(exception):
DocWithList(value=value)
else:
doc = DocWithList(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithList
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [({"value": "value"}, None), (object(), ValidationError)]
)
def test_dict_type(mongo_client, value, exception):
class DocWithDict(MappedClass):
value = Field("value", type_=DictType)
if exception:
with pytest.raises(exception):
DocWithDict(value=value)
else:
doc = DocWithDict(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDict
)
collection.insert_one(doc)
| 31.280612
| 88
| 0.669222
|
45451b97d4f7c0d42c86fc227cf2de163e89d397
| 18,751
|
py
|
Python
|
src/tts_preparation/app/prepare.py
|
stefantaubert/tts-preparation
|
dad7bb4c0f5093ff63736ae9d6553c7cfe916734
|
[
"MIT"
] | null | null | null |
src/tts_preparation/app/prepare.py
|
stefantaubert/tts-preparation
|
dad7bb4c0f5093ff63736ae9d6553c7cfe916734
|
[
"MIT"
] | null | null | null |
src/tts_preparation/app/prepare.py
|
stefantaubert/tts-preparation
|
dad7bb4c0f5093ff63736ae9d6553c7cfe916734
|
[
"MIT"
] | null | null | null |
import shutil
from logging import getLogger
from pathlib import Path
from statistics import mean
from typing import Callable, Optional, Set, Tuple
import pandas as pd
from general_utils import load_obj, save_obj
from text_selection import get_common_durations
from text_utils import SymbolIdDict
from text_utils.types import Symbol
from tts_preparation.app.merge_ds import (get_merged_dir, load_merged_data,
load_merged_speakers_json,
load_merged_symbol_converter)
from tts_preparation.core.data import DatasetType, PreparedDataList
from tts_preparation.core.helper import prep_data_list_to_dict_with_durations_s
from tts_preparation.core.prepare import (add_greedy_kld_ngram_seconds,
add_greedy_ngram_epochs,
add_greedy_ngram_seconds,
add_n_divergent_random_seconds,
add_ngram_cover,
add_random_ngram_cover_seconds,
add_random_percent,
add_random_seconds, add_rest,
add_symbols, core_process_stats,
get_random_seconds_divergent_seeds)
from tts_preparation.core.stats_speaker import (get_speaker_stats,
log_general_stats)
from tts_preparation.core.stats_symbols import get_ngram_stats_df
from tts_preparation.globals import DEFAULT_CSV_SEPERATOR
def __get_prep_root_dir(merged_dir: Path) -> Path:
return merged_dir / 'training'
def get_prep_dir(merged_dir: Path, prep_name: str) -> Path:
return __get_prep_root_dir(merged_dir) / prep_name
def save_trainset(prep_dir: Path, dataset: PreparedDataList) -> None:
path = get_trainset_path(prep_dir)
dataset.sort_after_entry_id()
save_obj(dataset, path)
def get_trainset_path(prep_dir: Path) -> Path:
path = prep_dir / "training.pkl"
return path
def load_trainset(prep_dir: Path) -> PreparedDataList:
path = get_trainset_path(prep_dir)
return load_obj(path)
def get_testset_path(prep_dir: Path) -> Path:
path = prep_dir / "test.pkl"
return path
def save_testset(prep_dir: Path, dataset: PreparedDataList) -> None:
path = get_testset_path(prep_dir)
dataset.sort_after_entry_id()
save_obj(dataset, path)
def load_testset(prep_dir: Path) -> PreparedDataList:
path = get_testset_path(prep_dir)
return load_obj(path)
def get_valset_path(prep_dir: Path) -> Path:
path = prep_dir / "validation.pkl"
return path
def save_valset(prep_dir: Path, dataset: PreparedDataList) -> None:
path = get_valset_path(prep_dir)
dataset.sort_after_entry_id()
save_obj(dataset, path)
def load_valset(prep_dir: Path) -> PreparedDataList:
path = get_valset_path(prep_dir)
return load_obj(path)
def get_restset_path(prep_dir: Path) -> Path:
path = prep_dir / "rest.pkl"
return path
def save_restset(prep_dir: Path, dataset: PreparedDataList) -> None:
path = get_restset_path(prep_dir)
dataset.sort_after_entry_id()
save_obj(dataset, path)
def load_restset(prep_dir: Path) -> PreparedDataList:
path = get_restset_path(prep_dir)
assert path.is_file()
return load_obj(path)
def get_totalset_path(prep_dir: Path) -> Path:
path = prep_dir / "total.pkl"
return path
def save_totalset(prep_dir: Path, dataset: PreparedDataList) -> None:
path = get_totalset_path(prep_dir)
dataset.sort_after_entry_id()
save_obj(dataset, path)
def load_totalset(prep_dir: Path) -> PreparedDataList:
path = get_totalset_path(prep_dir)
assert path.is_file()
return load_obj(path)
def _save_speaker_stats(prep_dir: Path, stats: pd.DataFrame) -> None:
path = prep_dir / "stats_speaker.csv"
stats.to_csv(path, sep=DEFAULT_CSV_SEPERATOR)
def _save_onegram_stats(prep_dir: Path, stats: pd.DataFrame) -> None:
path = prep_dir / "stats_onegram.csv"
stats.to_csv(path, sep=DEFAULT_CSV_SEPERATOR)
def _load_onegram_stats(prep_dir: Path) -> pd.DataFrame:
path = prep_dir / "stats_onegram.csv"
data = pd.read_csv(path, sep=DEFAULT_CSV_SEPERATOR)
return data
def _save_twogram_stats(prep_dir: Path, stats: pd.DataFrame) -> None:
path = prep_dir / "stats_twogram.csv"
stats.to_csv(path, sep=DEFAULT_CSV_SEPERATOR)
def _load_twogram_stats(prep_dir: Path) -> pd.DataFrame:
path = prep_dir / "stats_twogram.csv"
data = pd.read_csv(path, sep=DEFAULT_CSV_SEPERATOR)
return data
def _save_threegram_stats(prep_dir: Path, stats: pd.DataFrame) -> None:
path = prep_dir / "stats_threegram.csv"
stats.to_csv(path, sep=DEFAULT_CSV_SEPERATOR)
def _load_threegram_stats(prep_dir: Path) -> pd.DataFrame:
path = prep_dir / "stats_threegram.csv"
data = pd.read_csv(path, sep=DEFAULT_CSV_SEPERATOR)
return data
def print_and_save_stats(base_dir: Path, merge_name: str, prep_name: str) -> None:
logger = getLogger(__name__)
_print_quick_stats(base_dir, merge_name, prep_name)
merge_dir = get_merged_dir(base_dir, merge_name)
merge_data = load_merged_data(merge_dir)
prep_dir = get_prep_dir(merge_dir, prep_name)
trainset = load_trainset(prep_dir) if get_trainset_path(
prep_dir).is_file() else PreparedDataList()
testset = load_testset(prep_dir) if get_testset_path(prep_dir).is_file() else PreparedDataList()
valset = load_valset(prep_dir) if get_valset_path(prep_dir).is_file() else PreparedDataList()
restset = load_restset(prep_dir) if get_restset_path(prep_dir).is_file() else PreparedDataList()
symbols = load_merged_symbol_converter(merge_dir)
speakers = load_merged_speakers_json(merge_dir)
log_general_stats(
trainset=trainset,
valset=valset,
testset=testset,
restset=restset,
data=merge_data,
)
logger.info("Calculating speaker stats...")
speaker_stats = get_speaker_stats(
speakers=speakers,
symbols=symbols,
trainset=trainset,
valset=valset,
testset=testset,
restset=restset,
)
_save_speaker_stats(prep_dir, speaker_stats)
logger.info("Calculating onegram stats...")
onegram_stats = get_ngram_stats_df(
trainset=trainset,
valset=valset,
testset=testset,
restset=restset,
n=1,
)
_save_onegram_stats(prep_dir, onegram_stats)
logger.info("Calculating twogram stats...")
twogram_stats = get_ngram_stats_df(
trainset=trainset,
valset=valset,
testset=testset,
restset=restset,
n=2,
)
_save_twogram_stats(prep_dir, twogram_stats)
logger.info("Calculating threegram stats...")
threegram_stats = get_ngram_stats_df(
trainset=trainset,
valset=valset,
testset=testset,
restset=restset,
n=3,
)
_save_threegram_stats(prep_dir, threegram_stats)
logger.info("Done.")
def process_stats(base_dir: Path, merge_name: str, prep_name: str, ds: DatasetType) -> None:
merge_dir = get_merged_dir(base_dir, merge_name)
#merge_data = load_merged_data(merge_dir)
prep_dir = get_prep_dir(merge_dir, prep_name)
onegram_stats = _load_onegram_stats(prep_dir)
twogram_stats = _load_twogram_stats(prep_dir)
threegram_stats = _load_threegram_stats(prep_dir)
core_process_stats(
onegram_stats=onegram_stats,
twogram_stats=twogram_stats,
threegram_stats=threegram_stats,
speaker_stats=None,
ds=ds,
)
def _print_quick_stats(base_dir: Path, merge_name: str, prep_name: str) -> None:
merge_dir = get_merged_dir(base_dir, merge_name)
merge_data = load_merged_data(merge_dir)
prep_dir = get_prep_dir(merge_dir, prep_name)
trainset = load_trainset(prep_dir) if get_trainset_path(
prep_dir).is_file() else PreparedDataList()
testset = load_testset(prep_dir) if get_testset_path(prep_dir).is_file() else PreparedDataList()
valset = load_valset(prep_dir) if get_valset_path(prep_dir).is_file() else PreparedDataList()
restset = load_restset(prep_dir) if get_restset_path(prep_dir).is_file() else PreparedDataList()
log_general_stats(
trainset=trainset,
valset=valset,
testset=testset,
restset=restset,
data=merge_data,
)
def app_prepare(base_dir: Path, merge_name: str, prep_name: str, overwrite: bool = True) -> None:
logger = getLogger(__name__)
merge_dir = get_merged_dir(base_dir, merge_name)
prep_dir = get_prep_dir(merge_dir, prep_name)
if prep_dir.is_dir():
if overwrite:
logger.info("Removing existing...")
shutil.rmtree(prep_dir)
else:
logger.info("Already created.")
return
merge_data = load_merged_data(merge_dir)
prep_dir.mkdir(parents=True, exist_ok=False)
save_valset(prep_dir, PreparedDataList())
save_testset(prep_dir, PreparedDataList())
save_trainset(prep_dir, PreparedDataList())
save_restset(prep_dir, merge_data)
save_totalset(prep_dir, merge_data)
logger.info("Done.")
_print_quick_stats(base_dir, merge_name, prep_name)
def load_set(prep_dir: Path, dataset: DatasetType) -> PreparedDataList:
if dataset == DatasetType.TRAINING:
return load_trainset(prep_dir)
if dataset == DatasetType.VALIDATION:
return load_valset(prep_dir)
if dataset == DatasetType.TEST:
return load_testset(prep_dir)
raise Exception()
def _save_results(dest_prep_dir: Path, new_set: PreparedDataList, new_restset: PreparedDataList, dataset: DatasetType) -> None:
save_restset(dest_prep_dir, new_restset)
if dataset == DatasetType.TRAINING:
save_trainset(dest_prep_dir, new_set)
elif dataset == DatasetType.VALIDATION:
save_valset(dest_prep_dir, new_set)
elif dataset == DatasetType.TEST:
save_testset(dest_prep_dir, new_set)
def copy_orig_to_dest_dir(orig_prep_dir: Path, dest_prep_dir: Path) -> None:
dest_prep_dir.mkdir(parents=True, exist_ok=True)
save_trainset(dest_prep_dir, load_trainset(orig_prep_dir))
save_valset(dest_prep_dir, load_valset(orig_prep_dir))
save_testset(dest_prep_dir, load_testset(orig_prep_dir))
save_restset(dest_prep_dir, load_restset(orig_prep_dir))
save_totalset(dest_prep_dir, load_totalset(orig_prep_dir))
def app_add_rest(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, dataset: DatasetType, overwrite: bool = True) -> None:
__add(
base_dir=base_dir,
merge_name=merge_name,
orig_prep_name=orig_prep_name,
dest_prep_name=dest_prep_name,
dataset=dataset,
overwrite=overwrite,
func=add_rest,
)
def app_add_ngram_cover(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, dataset: DatasetType, n_gram: int, ignore_symbols: Optional[Set[Symbol]] = None, top_percent: Optional[float] = None, overwrite: bool = True) -> None:
__add(
base_dir=base_dir,
merge_name=merge_name,
orig_prep_name=orig_prep_name,
dest_prep_name=dest_prep_name,
dataset=dataset,
overwrite=overwrite,
func=add_ngram_cover,
n_gram=n_gram,
ignore_symbols=ignore_symbols,
top_percent=top_percent,
)
def app_add_random_minutes(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, minutes: float, seed: int, dataset: DatasetType, respect_existing: bool = False, overwrite: bool = True) -> None:
__add(
base_dir=base_dir,
merge_name=merge_name,
orig_prep_name=orig_prep_name,
dest_prep_name=dest_prep_name,
dataset=dataset,
overwrite=overwrite,
func=add_random_seconds,
seconds=minutes * 60,
seed=seed,
respect_existing=respect_existing,
)
def app_add_random_percent(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, percent: float, seed: int, dataset: DatasetType, overwrite: bool = True) -> None:
__add(
base_dir=base_dir,
merge_name=merge_name,
orig_prep_name=orig_prep_name,
dest_prep_name=dest_prep_name,
dataset=dataset,
overwrite=overwrite,
func=add_random_percent,
percent=percent,
seed=seed,
)
def app_add_symbols(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, dataset: DatasetType, cover_symbols: Set[Symbol], overwrite: bool = True) -> None:
__add(
base_dir=base_dir,
merge_name=merge_name,
orig_prep_name=orig_prep_name,
dest_prep_name=dest_prep_name,
dataset=dataset,
overwrite=overwrite,
func=add_symbols,
cover_symbols=cover_symbols,
)
def app_add_random_ngram_cover_minutes(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, dataset: DatasetType, n_gram: int, seed: int, minutes: float, ignore_symbols: Optional[Set[Symbol]] = None, overwrite: bool = True) -> None:
__add(
base_dir=base_dir,
merge_name=merge_name,
orig_prep_name=orig_prep_name,
dest_prep_name=dest_prep_name,
dataset=dataset,
overwrite=overwrite,
func=add_random_ngram_cover_seconds,
ignore_symbols=ignore_symbols,
seconds=minutes * 60,
n_gram=n_gram,
seed=seed,
)
def app_add_greedy_ngram_minutes(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, dataset: DatasetType, n_gram: int, minutes: float, ignore_symbols: Optional[Set[Symbol]] = None, overwrite: bool = True) -> None:
__add(
base_dir=base_dir,
merge_name=merge_name,
orig_prep_name=orig_prep_name,
dest_prep_name=dest_prep_name,
dataset=dataset,
overwrite=overwrite,
func=add_greedy_ngram_seconds,
ignore_symbols=ignore_symbols,
seconds=minutes * 60,
n_gram=n_gram,
)
def app_add_n_diverse_random_minutes(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, dataset: DatasetType, overwrite: bool, seed: int, minutes: float, n: int) -> None:
logger = getLogger(__name__)
logger.info(f"Adding utterances speaker-wise to {str(dataset)}...")
merge_dir = get_merged_dir(base_dir, merge_name)
orig_prep_dir = get_prep_dir(merge_dir, orig_prep_name)
new_datasets = add_n_divergent_random_seconds(
existing_set=load_set(orig_prep_dir, dataset),
restset=load_restset(orig_prep_dir),
seed=seed,
n=n,
seconds=minutes * 60,
)
dest_names = []
for i, (new_set, new_restset) in enumerate(new_datasets):
logger.info(f"Saving {i+1}/{len(new_datasets)}...")
dest_name = f"{dest_prep_name}_{i+1}"
dest_prep_dir = get_prep_dir(merge_dir, dest_name)
if not overwrite and dest_prep_dir.is_dir():
logger.info(f"{dest_name} already exists. Skipping...")
continue
if dest_prep_dir != orig_prep_dir:
copy_orig_to_dest_dir(orig_prep_dir, dest_prep_dir)
_save_results(dest_prep_dir, new_set, new_restset, dataset)
logger.info(f"Saved to: {dest_name}")
_print_quick_stats(base_dir, merge_name, dest_name)
dest_names.append(dest_name)
new_sets = [
{x.entry_id for x in load_trainset(
get_prep_dir(merge_dir, dest_name)).items()}
for dest_name in dest_names
]
total_set = load_totalset(orig_prep_dir)
durations_s = prep_data_list_to_dict_with_durations_s(total_set)
common_durations = get_common_durations(new_sets, durations_s)
logger.info("Overlapping")
for set_combi, common_duration_s in common_durations.items():
logger.info(
f"{set_combi}: {common_duration_s:.2f}s / {common_duration_s / 60:.2f}min / {common_duration_s / 60 / 60:.2f}h")
mean_s = mean(common_durations.values())
logger.info(
f"Average duration: {mean_s:.2f}s / {mean_s / 60:.2f}min / {mean_s / 60 / 60:.2f}h")
mean_dur = mean(durations_s.values())
#common_elements = get_total_number_of_common_elements(new_sets)
#logger.info(f"Entries: {common_elements}")
logger.info(f"Avg Entry Dur: {mean_dur:.2f}s")
#avg_entries = common_elements / len(common_durations)
#logger.info(f"Avg Entries: {avg_entries:.0f} = {avg_entries * mean_dur:.2f}s")
logger.info("Done.")
def app_add_greedy_ngram_epochs(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, dataset: DatasetType, n_gram: int, epochs: int, ignore_symbols: Optional[Set[Symbol]] = None, overwrite: bool = True) -> None:
__add(
base_dir=base_dir,
merge_name=merge_name,
orig_prep_name=orig_prep_name,
dest_prep_name=dest_prep_name,
dataset=dataset,
overwrite=overwrite,
func=add_greedy_ngram_epochs,
ignore_symbols=ignore_symbols,
n_gram=n_gram,
epochs=epochs,
)
def app_add_greedy_kld_ngram_minutes(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, dataset: DatasetType, n_gram: int, minutes: float, ignore_symbols: Optional[Set[Symbol]] = None, overwrite: bool = True) -> None:
__add(
base_dir=base_dir,
merge_name=merge_name,
orig_prep_name=orig_prep_name,
dest_prep_name=dest_prep_name,
dataset=dataset,
overwrite=overwrite,
func=add_greedy_kld_ngram_seconds,
ignore_symbols=ignore_symbols,
seconds=minutes * 60,
n_gram=n_gram,
)
def __add(base_dir: Path, merge_name: str, orig_prep_name: str, dest_prep_name: str, dataset: DatasetType, overwrite: bool, func: Callable[[PreparedDataList, PreparedDataList, SymbolIdDict], Tuple[PreparedDataList, PreparedDataList]], **kwargs) -> None:
logger = getLogger(__name__)
logger.info(f"Adding utterances speaker-wise to {str(dataset)}...")
merge_dir = get_merged_dir(base_dir, merge_name)
dest_prep_dir = get_prep_dir(merge_dir, dest_prep_name)
if not overwrite and dest_prep_dir.is_dir():
logger.info("Already exists.")
orig_prep_dir = get_prep_dir(merge_dir, orig_prep_name)
new_set, new_restset = func(
existing_set=load_set(orig_prep_dir, dataset),
restset=load_restset(orig_prep_dir),
**kwargs,
)
if dest_prep_dir != orig_prep_dir:
copy_orig_to_dest_dir(orig_prep_dir, dest_prep_dir)
_save_results(dest_prep_dir, new_set, new_restset, dataset)
logger.info("Done.")
_print_quick_stats(base_dir, merge_name, dest_prep_name)
def app_get_random_seconds_divergent_seeds(base_dir: Path, merge_name: str, prep_name: str, minutes: float, seed: int, n: int) -> None:
logger = getLogger(__name__)
merge_dir = get_merged_dir(base_dir, merge_name)
orig_prep_dir = get_prep_dir(merge_dir, prep_name)
rest_set = load_restset(orig_prep_dir)
selected_seeds = get_random_seconds_divergent_seeds(
restset=rest_set,
seed=seed,
seconds=minutes * 60,
n=n,
)
logger.info("The most divergent seeds are:")
logger.info(selected_seeds)
# show_n = 10
# for selected_seed, selected_set in zip(selected_seeds, selected_sets):
# selected_entry_ids = list(sorted(selected_set))
# first_entries = list(map(str, selected_entry_ids[:show_n]))
# last_entries = list(map(str, selected_entry_ids[-show_n:]))
# logger.info(
# f"{selected_seed}: {', '.join(first_entries)}, ..., {', '.join(last_entries)} ({len(selected_entry_ids)})")
| 34.279707
| 254
| 0.736867
|
9e4f6e2eccfbb83086d67ad2ee2e81bd641df243
| 27,306
|
py
|
Python
|
python/federatedml/ensemble/basic_algorithms/decision_tree/hetero/hetero_decision_tree_host.py
|
eliaskousk/FATE
|
242e47d6ae439a3b69ecb1610cb370b29b024413
|
[
"Apache-2.0"
] | 1
|
2022-02-07T06:23:15.000Z
|
2022-02-07T06:23:15.000Z
|
python/federatedml/ensemble/basic_algorithms/decision_tree/hetero/hetero_decision_tree_host.py
|
eliaskousk/FATE
|
242e47d6ae439a3b69ecb1610cb370b29b024413
|
[
"Apache-2.0"
] | null | null | null |
python/federatedml/ensemble/basic_algorithms/decision_tree/hetero/hetero_decision_tree_host.py
|
eliaskousk/FATE
|
242e47d6ae439a3b69ecb1610cb370b29b024413
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.node import Node
from federatedml.util import LOGGER
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import DecisionTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import DecisionTreeModelParam
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.decision_tree import DecisionTree
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import SplitInfo
from federatedml.transfer_variable.transfer_class.hetero_decision_tree_transfer_variable import \
HeteroDecisionTreeTransferVariable
from federatedml.util import consts
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.g_h_optim import PackedGHCompressor
import functools
class HeteroDecisionTreeHost(DecisionTree):
def __init__(self, tree_param):
super(HeteroDecisionTreeHost, self).__init__(tree_param)
self.encrypted_grad_and_hess = None
self.runtime_idx = 0
self.sitename = consts.HOST # will be modified in self.set_runtime_idx()
self.complete_secure_tree = False
self.host_party_idlist = []
# feature shuffling / missing_dir masking
self.feature_num = -1
self.missing_dir_mask_left = {} # mask for left direction
self.missing_dir_mask_right = {} # mask for right direction
self.split_maskdict = {} # mask for split value
self.missing_dir_maskdict = {}
self.fid_bid_random_mapping = {}
self.inverse_fid_bid_random_mapping = {}
self.bin_num = None
# goss subsample
self.run_goss = False
# transfer variable
self.transfer_inst = HeteroDecisionTreeTransferVariable()
# cipher compressing
self.cipher_compressor = None
self.run_cipher_compressing = True
# code version control
self.new_ver = True
"""
Setting
"""
def report_init_status(self):
LOGGER.info('reporting initialization status')
LOGGER.info('using new version code {}'.format(self.new_ver))
if self.complete_secure_tree:
LOGGER.info('running complete secure')
if self.run_goss:
LOGGER.info('running goss')
if self.run_cipher_compressing:
LOGGER.info('running cipher compressing')
LOGGER.debug('bin num and feature num: {}/{}'.format(self.bin_num, self.feature_num))
def init(self, flowid, runtime_idx, data_bin, bin_split_points, bin_sparse_points, bin_num,
valid_features,
complete_secure=False,
goss_subsample=False,
cipher_compressing=False,
new_ver=True):
super(HeteroDecisionTreeHost, self).init_data_and_variable(flowid, runtime_idx, data_bin, bin_split_points,
bin_sparse_points, valid_features, None)
self.check_max_split_nodes()
self.complete_secure_tree = complete_secure
self.run_goss = goss_subsample
self.bin_num = bin_num
self.run_cipher_compressing = cipher_compressing
self.feature_num = self.bin_split_points.shape[0]
self.new_ver = new_ver
self.report_init_status()
def set_host_party_idlist(self, l):
self.host_party_idlist = l
"""
Node encode/decode
"""
def generate_missing_dir(self, dep, left_num=3, right_num=3):
"""
randomly generate missing dir mask
"""
rn = np.random.choice(range(left_num+right_num), left_num + right_num, replace=False)
left_dir = rn[0:left_num]
right_dir = rn[left_num:]
self.missing_dir_mask_left[dep] = left_dir
self.missing_dir_mask_right[dep] = right_dir
@staticmethod
def generate_fid_bid_random_mapping(feature_num, bin_num):
total_id_num = feature_num * bin_num
mapping = {}
idx = 0
id_list = np.random.choice(range(total_id_num), total_id_num, replace=False)
for fid in range(feature_num):
for bid in range(bin_num):
mapping[(fid, bid)] = int(id_list[idx])
idx += 1
return mapping
def encode(self, etype="feature_idx", val=None, nid=None):
if etype == "feature_idx":
return val
if etype == "feature_val":
self.split_maskdict[nid] = val
return None
if etype == "missing_dir":
self.missing_dir_maskdict[nid] = val
return None
raise TypeError("encode type %s is not support!" % (str(etype)))
@staticmethod
def decode(dtype="feature_idx", val=None, nid=None, split_maskdict=None, missing_dir_maskdict=None):
if dtype == "feature_idx":
return val
if dtype == "feature_val":
if nid in split_maskdict:
return split_maskdict[nid]
else:
raise ValueError("decode val %s cause error, can't recognize it!" % (str(val)))
if dtype == "missing_dir":
if nid in missing_dir_maskdict:
return missing_dir_maskdict[nid]
else:
raise ValueError("decode val %s cause error, can't recognize it!" % (str(val)))
return TypeError("decode type %s is not support!" % (str(dtype)))
def generate_split_point_masking_variable(self, dep):
# for split point masking
self.generate_missing_dir(dep, 5, 5)
self.fid_bid_random_mapping = self.generate_fid_bid_random_mapping(self.feature_num, self.bin_num)
self.inverse_fid_bid_random_mapping = {v: k for k, v in self.fid_bid_random_mapping.items()}
def unmask_split_info(self, split_info_list, inverse_mask_id_mapping, left_missing_dir, right_missing_dir):
for split_info in split_info_list:
if split_info.mask_id is not None:
fid, bid = inverse_mask_id_mapping[split_info.mask_id]
split_info.best_fid, split_info.best_bid = fid, bid
masked_missing_dir = split_info.missing_dir
if masked_missing_dir in left_missing_dir:
split_info.missing_dir = -1
elif masked_missing_dir in right_missing_dir:
split_info.missing_dir = 1
return split_info_list
def encode_split_info(self, split_info_list):
final_split_info = []
for i, split_info in enumerate(split_info_list):
if split_info.best_fid != -1:
LOGGER.debug('sitename is {}, self.sitename is {}'
.format(split_info.sitename, self.sitename))
assert split_info.sitename == self.sitename
split_info.best_fid = self.encode("feature_idx", split_info.best_fid)
assert split_info.best_fid is not None
split_info.best_bid = self.encode("feature_val", split_info.best_bid, self.cur_to_split_nodes[i].id)
split_info.missing_dir = self.encode("missing_dir", split_info.missing_dir, self.cur_to_split_nodes[i].id)
split_info.mask_id = None
else:
LOGGER.debug('this node can not be further split by host feature: {}'.format(split_info))
final_split_info.append(split_info)
return final_split_info
"""
Federation Functions
"""
def init_compressor_and_sync_gh(self):
LOGGER.info("get encrypted grad and hess")
if self.run_cipher_compressing:
self.cipher_compressor = PackedGHCompressor()
self.grad_and_hess = self.transfer_inst.encrypted_grad_and_hess.get(idx=0)
def sync_node_positions(self, dep=-1):
LOGGER.info("get tree node queue of depth {}".format(dep))
node_positions = self.transfer_inst.node_positions.get(idx=0,
suffix=(dep,))
return node_positions
def sync_tree_node_queue(self, dep=-1):
LOGGER.info("get tree node queue of depth {}".format(dep))
self.cur_layer_nodes = self.transfer_inst.tree_node_queue.get(idx=0,
suffix=(dep,))
def sync_encrypted_splitinfo_host(self, encrypted_splitinfo_host, dep=-1, batch=-1):
LOGGER.info("send encrypted splitinfo of depth {}, batch {}".format(dep, batch))
self.transfer_inst.encrypted_splitinfo_host.remote(encrypted_splitinfo_host,
role=consts.GUEST,
idx=-1,
suffix=(dep, batch,))
def sync_federated_best_splitinfo_host(self, dep=-1, batch=-1):
LOGGER.info("get federated best splitinfo of depth {}, batch {}".format(dep, batch))
federated_best_splitinfo_host = self.transfer_inst.federated_best_splitinfo_host.get(idx=0,
suffix=(dep, batch,))
return federated_best_splitinfo_host
def sync_final_splitinfo_host(self, splitinfo_host, federated_best_splitinfo_host, dep=-1, batch=-1):
LOGGER.info("send host final splitinfo of depth {}, batch {}".format(dep, batch))
final_splitinfos = []
for i in range(len(splitinfo_host)):
best_idx, best_gain = federated_best_splitinfo_host[i]
if best_idx != -1:
LOGGER.debug('sitename is {}, self.sitename is {}'
.format(splitinfo_host[i][best_idx].sitename, self.sitename))
assert splitinfo_host[i][best_idx].sitename == self.sitename
splitinfo = splitinfo_host[i][best_idx]
splitinfo.best_fid = self.encode("feature_idx", splitinfo.best_fid)
assert splitinfo.best_fid is not None
splitinfo.best_bid = self.encode("feature_val", splitinfo.best_bid, self.cur_to_split_nodes[i].id)
splitinfo.missing_dir = self.encode("missing_dir", splitinfo.missing_dir, self.cur_to_split_nodes[i].id)
splitinfo.gain = best_gain
else:
splitinfo = SplitInfo(sitename=self.sitename, best_fid=-1, best_bid=-1, gain=best_gain)
final_splitinfos.append(splitinfo)
self.transfer_inst.final_splitinfo_host.remote(final_splitinfos,
role=consts.GUEST,
idx=-1,
suffix=(dep, batch,))
def sync_dispatch_node_host(self, dep):
LOGGER.info("get node from host to dispath, depth is {}".format(dep))
dispatch_node_host = self.transfer_inst.dispatch_node_host.get(idx=0,
suffix=(dep,))
return dispatch_node_host
def sync_dispatch_node_host_result(self, dispatch_node_host_result, dep=-1):
LOGGER.info("send host dispatch result, depth is {}".format(dep))
self.transfer_inst.dispatch_node_host_result.remote(dispatch_node_host_result,
role=consts.GUEST,
idx=-1,
suffix=(dep,))
def sync_tree(self,):
LOGGER.info("sync tree from guest")
self.tree_node = self.transfer_inst.tree.get(idx=0)
def sync_predict_finish_tag(self, recv_times):
LOGGER.info("get the {}-th predict finish tag from guest".format(recv_times))
finish_tag = self.transfer_inst.predict_finish_tag.get(idx=0,
suffix=(recv_times,))
return finish_tag
def sync_predict_data(self, recv_times):
LOGGER.info("srecv predict data to host, recv times is {}".format(recv_times))
predict_data = self.transfer_inst.predict_data.get(idx=0,
suffix=(recv_times,))
return predict_data
def sync_data_predicted_by_host(self, predict_data, send_times):
LOGGER.info("send predicted data by host, send times is {}".format(send_times))
self.transfer_inst.predict_data_by_host.remote(predict_data,
role=consts.GUEST,
idx=0,
suffix=(send_times,))
"""
Tree Updating
"""
@staticmethod
def assign_an_instance(value1, value2, sitename=None, decoder=None,
bin_sparse_points=None,
use_missing=False, zero_as_missing=False,
split_maskdict=None,
missing_dir_maskdict=None):
unleaf_state, fid, bid, node_sitename, nodeid, left_nodeid, right_nodeid = value1
if node_sitename != sitename:
return value1
fid = decoder("feature_idx", fid, nodeid, split_maskdict=split_maskdict)
bid = decoder("feature_val", bid, nodeid, split_maskdict=split_maskdict)
missing_dir = decoder("missing_dir", 1, nodeid, missing_dir_maskdict=missing_dir_maskdict)
direction = HeteroDecisionTreeHost.make_decision(value2, fid, bid, missing_dir, use_missing, zero_as_missing,
bin_sparse_points[fid])
return (unleaf_state, left_nodeid) if direction else (unleaf_state, right_nodeid)
def assign_instances_to_new_node(self, dispatch_node_host, dep=-1):
LOGGER.info("start to find host dispath of depth {}".format(dep))
dispatch_node_method = functools.partial(self.assign_an_instance,
sitename=self.sitename,
decoder=self.decode,
split_maskdict=self.split_maskdict,
bin_sparse_points=self.bin_sparse_points,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
missing_dir_maskdict=self.missing_dir_maskdict)
dispatch_node_host_result = dispatch_node_host.join(self.data_bin, dispatch_node_method)
self.sync_dispatch_node_host_result(dispatch_node_host_result, dep)
def update_instances_node_positions(self):
# join data and inst2node_idx to update current node positions of samples
self.data_with_node_assignments = self.data_bin.join(self.inst2node_idx, lambda v1, v2: (v1, v2))
"""
Pre-Process / Post-Process
"""
def remove_duplicated_split_nodes(self, split_nid_used):
LOGGER.info("remove duplicated nodes from split mask dict")
duplicated_nodes = set(self.split_maskdict.keys()) - set(split_nid_used)
for nid in duplicated_nodes:
del self.split_maskdict[nid]
def convert_bin_to_real(self, decode_func, split_maskdict):
LOGGER.info("convert tree node bins to real value")
split_nid_used = []
for i in range(len(self.tree_node)):
if self.tree_node[i].is_leaf is True:
continue
if self.tree_node[i].sitename == self.sitename:
fid = decode_func("feature_idx", self.tree_node[i].fid, self.tree_node[i].id, split_maskdict)
bid = decode_func("feature_val", self.tree_node[i].bid, self.tree_node[i].id, split_maskdict)
LOGGER.debug("shape of bin_split_points is {}".format(len(self.bin_split_points[fid])))
real_splitval = self.encode("feature_val", self.bin_split_points[fid][bid], self.tree_node[i].id)
self.tree_node[i].bid = real_splitval
self.tree_node[i].fid = fid
split_nid_used.append(self.tree_node[i].id)
self.remove_duplicated_split_nodes(split_nid_used)
"""
Split finding
"""
def get_computing_inst2node_idx(self):
if self.run_goss:
inst2node_idx = self.inst2node_idx.join(self.grad_and_hess, lambda x1, x2: x1)
else:
inst2node_idx = self.inst2node_idx
return inst2node_idx
def compute_best_splits2(self, cur_to_split_nodes: list, node_map, dep, batch):
LOGGER.info('solving node batch {}, node num is {}'.format(batch, len(cur_to_split_nodes)))
if not self.complete_secure_tree:
data = self.data_with_node_assignments
inst2node_idx = self.get_computing_inst2node_idx()
node_sample_count = self.count_node_sample_num(inst2node_idx, node_map)
LOGGER.debug('sample count is {}'.format(node_sample_count))
acc_histograms = self.get_local_histograms(dep, data, self.grad_and_hess, node_sample_count,
cur_to_split_nodes, node_map, ret='tb',
hist_sub=True)
split_info_table = self.splitter.host_prepare_split_points(histograms=acc_histograms,
use_missing=self.use_missing,
valid_features=self.valid_features,
sitename=self.sitename,
left_missing_dir=self.missing_dir_mask_left[dep],
right_missing_dir=self.missing_dir_mask_right[dep],
mask_id_mapping=self.fid_bid_random_mapping,
batch_size=self.bin_num,
cipher_compressor=self.cipher_compressor,
shuffle_random_seed=np.abs(hash((dep, batch)))
)
# test split info encryption
self.transfer_inst.encrypted_splitinfo_host.remote(split_info_table,
role=consts.GUEST,
idx=-1,
suffix=(dep, batch))
best_split_info = self.transfer_inst.federated_best_splitinfo_host.get(suffix=(dep, batch), idx=0)
unmasked_split_info = self.unmask_split_info(best_split_info, self.inverse_fid_bid_random_mapping,
self.missing_dir_mask_left[dep], self.missing_dir_mask_right[dep])
return_split_info = self.encode_split_info(unmasked_split_info)
self.transfer_inst.final_splitinfo_host.remote(return_split_info,
role=consts.GUEST,
idx=-1,
suffix=(dep, batch,))
else:
LOGGER.debug('skip splits computation')
def compute_best_splits(self, cur_to_split_nodes: list, node_map: dict, dep: int, batch: int):
if not self.complete_secure_tree:
data = self.data_with_node_assignments
acc_histograms = self.get_local_histograms(dep, data, self.grad_and_hess,
None, cur_to_split_nodes, node_map, ret='tb',
hist_sub=False)
splitinfo_host, encrypted_splitinfo_host = self.splitter.find_split_host(histograms=acc_histograms,
node_map=node_map,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
valid_features=self.valid_features,
sitename=self.sitename,)
self.sync_encrypted_splitinfo_host(encrypted_splitinfo_host, dep, batch)
federated_best_splitinfo_host = self.sync_federated_best_splitinfo_host(dep, batch)
self.sync_final_splitinfo_host(splitinfo_host, federated_best_splitinfo_host, dep, batch)
LOGGER.debug('computing host splits done')
else:
LOGGER.debug('skip splits computation')
"""
Fit & Predict
"""
def fit(self):
LOGGER.info("begin to fit host decision tree")
self.init_compressor_and_sync_gh()
LOGGER.debug('grad and hess count {}'.format(self.grad_and_hess.count()))
for dep in range(self.max_depth):
LOGGER.debug('At dep {}'.format(dep))
self.sync_tree_node_queue(dep)
self.generate_split_point_masking_variable(dep)
if len(self.cur_layer_nodes) == 0:
break
self.inst2node_idx = self.sync_node_positions(dep)
self.update_instances_node_positions()
batch = 0
for i in range(0, len(self.cur_layer_nodes), self.max_split_nodes):
self.cur_to_split_nodes = self.cur_layer_nodes[i: i + self.max_split_nodes]
if self.new_ver:
self.compute_best_splits2(self.cur_to_split_nodes,
node_map=self.get_node_map(self.cur_to_split_nodes),
dep=dep, batch=batch)
else:
self.compute_best_splits(self.cur_to_split_nodes,
node_map=self.get_node_map(self.cur_to_split_nodes), dep=dep, batch=batch)
batch += 1
dispatch_node_host = self.sync_dispatch_node_host(dep)
self.assign_instances_to_new_node(dispatch_node_host, dep=dep)
self.sync_tree()
self.convert_bin_to_real(decode_func=self.decode, split_maskdict=self.split_maskdict)
LOGGER.info("fitting host decision tree done")
@staticmethod
def traverse_tree(predict_state, data_inst, tree_=None,
decoder=None, split_maskdict=None, sitename=consts.HOST,
use_missing=False, zero_as_missing=False,
missing_dir_maskdict=None):
nid, _ = predict_state
if tree_[nid].sitename != sitename:
return predict_state
while tree_[nid].sitename == sitename:
nid = HeteroDecisionTreeHost.go_next_layer(tree_[nid], data_inst, use_missing, zero_as_missing,
None, split_maskdict, missing_dir_maskdict, decoder)
return nid, 0
def predict(self, data_inst):
LOGGER.info("start to predict!")
site_guest_send_times = 0
while True:
finish_tag = self.sync_predict_finish_tag(site_guest_send_times)
if finish_tag is True:
break
predict_data = self.sync_predict_data(site_guest_send_times)
traverse_tree = functools.partial(self.traverse_tree,
tree_=self.tree_node,
decoder=self.decode,
split_maskdict=self.split_maskdict,
sitename=self.sitename,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
missing_dir_maskdict=self.missing_dir_maskdict)
predict_data = predict_data.join(data_inst, traverse_tree)
self.sync_data_predicted_by_host(predict_data, site_guest_send_times)
site_guest_send_times += 1
LOGGER.info("predict finish!")
"""
Tree Output
"""
def get_model_meta(self):
model_meta = DecisionTreeModelMeta()
model_meta.max_depth = self.max_depth
model_meta.min_sample_split = self.min_sample_split
model_meta.min_impurity_split = self.min_impurity_split
model_meta.min_leaf_node = self.min_leaf_node
model_meta.use_missing = self.use_missing
model_meta.zero_as_missing = self.zero_as_missing
return model_meta
def set_model_meta(self, model_meta):
self.max_depth = model_meta.max_depth
self.min_sample_split = model_meta.min_sample_split
self.min_impurity_split = model_meta.min_impurity_split
self.min_leaf_node = model_meta.min_leaf_node
self.use_missing = model_meta.use_missing
self.zero_as_missing = model_meta.zero_as_missing
def get_model_param(self):
model_param = DecisionTreeModelParam()
for node in self.tree_node:
model_param.tree_.add(id=node.id,
sitename=node.sitename,
fid=node.fid,
bid=node.bid,
weight=node.weight,
is_leaf=node.is_leaf,
left_nodeid=node.left_nodeid,
right_nodeid=node.right_nodeid,
missing_dir=node.missing_dir)
model_param.split_maskdict.update(self.split_maskdict)
model_param.missing_dir_maskdict.update(self.missing_dir_maskdict)
return model_param
def set_model_param(self, model_param):
self.tree_node = []
for node_param in model_param.tree_:
_node = Node(id=node_param.id,
sitename=node_param.sitename,
fid=node_param.fid,
bid=node_param.bid,
weight=node_param.weight,
is_leaf=node_param.is_leaf,
left_nodeid=node_param.left_nodeid,
right_nodeid=node_param.right_nodeid,
missing_dir=node_param.missing_dir)
self.tree_node.append(_node)
self.split_maskdict = dict(model_param.split_maskdict)
self.missing_dir_maskdict = dict(model_param.missing_dir_maskdict)
"""
don t have to implements
"""
def initialize_root_node(self, *args):
pass
def update_tree(self, *args):
pass
| 45.585977
| 123
| 0.577089
|
9faa227874d1347d1eb48b066eed1b960f461c56
| 12,899
|
py
|
Python
|
final_project/train.py
|
dbirman/cs375
|
7aeac1ed57eff74cbecb3e1091b01f00d34629a8
|
[
"MIT"
] | null | null | null |
final_project/train.py
|
dbirman/cs375
|
7aeac1ed57eff74cbecb3e1091b01f00d34629a8
|
[
"MIT"
] | null | null | null |
final_project/train.py
|
dbirman/cs375
|
7aeac1ed57eff74cbecb3e1091b01f00d34629a8
|
[
"MIT"
] | 2
|
2017-12-02T01:46:28.000Z
|
2018-01-08T21:36:58.000Z
|
"""
Final project
"""
import os
import numpy as np
import tensorflow as tf
from tfutils import base, data, model, optimizer, utils
from data_provider import Combine_world
from yolo_tiny_net import YoloTinyNet
class ImageNetYOLO():
"""
Defines the ImageNet training experiment
"""
class Config():
"""
Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters.
Please set the seed to your group number. You can also change the batch
size and n_epochs if you want but please do not change the rest.
"""
batch_size = 1 # 256
data_path = '/datasets/TFRecord_Imagenet_standard'
seed = 0
crop_size = 224
thres_loss = 1E20#1000
n_epochs = 90
datasets = {'imagenet': 1, 'coco': 1}
common_params = {
'image_size': crop_size,
'num_classes': 80,
'batch_size': batch_size
}
net_params = {
'boxes_per_cell': 2,
'weight_decay': 0.0005,
'cell_size': 4,
'object_scale':1,
'noobject_scale':1,
'class_scale':1,
'coord_scale':1
}
ytn = YoloTinyNet(common_params,net_params,test=False)
train_steps = 160000#1500
val_steps = 100
def setup_params(self):
"""
This function illustrates how to setup up the parameters for
train_from_params.
"""
params = {}
"""
train_params defines the training parameters consisting of
- the data provider that reads the data, preprocesses it and enqueues it into
the data queue
- the data queue that batches and if specified shuffles the data and provides
the input to the model
- other configuration parameters like the number of training steps
It's arguments are
data_params: defines how the data is read in.
queue_params: defines how the data is presented to the model, i.e.
if it is shuffled or not and how big of a batch size is used.
targets: the targets to be extracted and evaluated in the tensorflow session
num_steps: number of training steps
thres_loss: if the loss exceeds thres_loss the training will be stopped
validate_first: run validation before starting the training
"""
params['inter_op_parallelism_threads'] = 500
params['train_params'] = {
'data_params': {
# ImageNet data provider arguments
'func': Combine_world,
'cfg_dataset': self.Config.datasets,
'group': 'train',
'crop_size': self.Config.crop_size,
# TFRecords (super class) data provider arguments
'file_pattern': 'train*.tfrecords',
'batch_size': 1, #self.Config.batch_size,
'shuffle': False,
'shuffle_seed': self.Config.seed,
'file_grab_func': self.subselect_tfrecords,
'n_threads': 1,#sum(self.Config.datasets.values()),
},
'queue_params': {
'queue_type': 'random',
'batch_size': self.Config.batch_size,
'seed': self.Config.seed,
'capacity': self.Config.batch_size * 10,
'min_after_dequeue': self.Config.batch_size * 5,
},
'targets': {
'func': self.return_outputs,
'targets': [],
},
'num_steps': self.Config.train_steps,
'thres_loss': self.Config.thres_loss,
'validate_first': False,
}
"""
validation_params similar to train_params defines the validation parameters.
It has the same arguments as train_params and additionally
agg_func: function that aggregates the validation results across batches,
e.g. to calculate the mean of across batch losses
online_agg_func: function that aggregates the validation results across
batches in an online manner, e.g. to calculate the RUNNING mean across
batch losses
"""
"""
params['validation_params'] = {
'topn_val': {
'data_params': {
# ImageNet data provider arguments
'func': ImageNetDataProvider,
'data_path': self.Config.data_path,
'group': 'val',
'crop_size': self.Config.crop_size,
# TFRecords (super class) data provider arguments
'file_pattern': 'validation*.tfrecords',
'batch_size': self.Config.batch_size,
'shuffle': False,
'shuffle_seed': self.Config.seed,
'file_grab_func': self.subselect_tfrecords,
'n_threads': 4,
},
'queue_params': {
'queue_type': 'fifo',
'batch_size': self.Config.batch_size,
'seed': self.Config.seed,
'capacity': self.Config.batch_size * 10,
'min_after_dequeue': self.Config.batch_size * 5,
},
'targets': {
'func': self.in_top_k,
},
'num_steps': self.Config.val_steps,
'agg_func': self.agg_mean,
'online_agg_func': self.online_agg_mean,
}
}
"""
params['validation_params'] = {}
"""
model_params defines the model i.e. the architecture that
takes the output of the data provider as input and outputs
the prediction of the model.
You will need to EDIT alexnet_model in models.py. alexnet_model
is supposed to define a standard AlexNet model in tensorflow.
Please open models.py and fill out the missing parts in the alexnet_model
function. Once you start working with different models you will need to
switch out alexnet_model with your model function.
"""
params['model_params'] = {
'func': self.Config.ytn.inference,
}
"""
loss_params defines your training loss.
You will need to EDIT 'loss_per_case_func'.
Implement a softmax cross-entropy loss. You can use tensorflow's
tf.nn.sparse_softmax_cross_entropy_with_logits function.
Note:
1.) loss_per_case_func is called with
loss_per_case_func(inputs, outputs)
by tfutils.
2.) labels = outputs['labels']
logits = outputs['pred']
"""
def loss_wrapper(inputs, outputs):
# coco
predicts = outputs['bboxes']
gt_boxes = tf.reshape(tf.cast(outputs['boxes'], tf.int32), [self.Config.batch_size, -1, 5])
num_objects = outputs['num_objects']
coco_loss, _, _ = self.Config.ytn.loss(predicts, gt_boxes, num_objects)
# imagenet
labels = outputs['labels']
logits = outputs['logits']
imagenet_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
print(imagenet_loss, coco_loss)
return imagenet_loss + coco_loss
params['loss_params'] = {
'targets': ['labels'],
'agg_func': tf.reduce_mean,
'loss_per_case_func': loss_wrapper,
'loss_per_case_func_params' : {'_outputs': 'outputs',
'_targets_$all': 'inputs'},
'loss_func_kwargs' : {},
}
"""
learning_rate_params defines the learning rate, decay and learning function.
You will need to EDIT this part. Replace the exponential decay
learning rate policy with a piecewise constant learning policy.
ATTENTION:
1.) 'learning_rate', 'decay_steps', 'decay_rate' and 'staircase' are not
arguments of tf.train.piecewise_constant! You will need to replace
them with the appropriate keys.
2.) 'func' passes global_step as input to your learning rate policy
function. Set the 'x' argument of tf.train.piecewise_constant to
global_step.
3.) set 'values' to [0.01, 0.005, 0.001, 0.0005] and
'boundaries' to [150000, 300000, 450000] for a batch size of 256
4.) You will need to delete all keys except for 'func' and replace them
with the input arguments to
"""
params['learning_rate_params'] = {
'func': tf.train.exponential_decay,
'learning_rate': 0.001,
'decay_steps': 5000, # FIX LATER,
'decay_rate': 0.95,
'staircase': True,
}
"""
optimizer_params defines the optimizer.
You will need to EDIT the optimizer class. Replace the Adam optimizer
with a momentum optimizer after switching the learning rate policy to
piecewise constant.
"""
params['optimizer_params'] = {
'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.AdamOptimizer,
'clip': False,
}
"""
save_params defines how, where and when your training results are saved
in the database.
You will need to EDIT this part. Set your 'host' (set it to 'localhost',
or to IP if using remote mongodb), 'port' (set it to 24444, unless you
have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'.
"""
params['save_params'] = {
'host': '35.199.154.71 ',
'port': 24444,
'dbname': 'final',
'collname': 'yolo',
'exp_id': 'combined_fix',
'save_valid_freq': 10000,
'save_filters_freq': 5000,
'cache_filters_freq': 5000,
'save_metrics_freq': 200,
'save_initial_filters' : False,
'save_to_gfs': [],
}
"""
load_params defines how and if a model should be restored from the database.
You will need to EDIT this part. Set your 'host' (set it to 'localhost',
or to IP if using remote mongodb), 'port' (set it to 24444, unless you
have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'.
If you want to restore your training these parameters should be the same
as in 'save_params'.
"""
params['load_params'] = {
'host': '35.199.154.71 ',
'port': 24444,
'dbname': 'final',
'collname': 'yolo',
'exp_id': 'imagenet',
'do_restore': True,
'load_query': None,
}
return params
def agg_mean(self, x):
return {k: np.mean(v) for k, v in x.items()}
def in_top_k(self, inputs, outputs):
"""
Implements top_k loss for validation
You will need to EDIT this part. Implement the top1 and top5 functions
in the respective dictionary entry.
"""
def k_wrapper(inputs, outputs, k):
return tf.nn.in_top_k(outputs['logits'], inputs['labels'], k)
return {'top1': k_wrapper(inputs, outputs, 1),
'top5': k_wrapper(inputs, outputs, 5)}
def subselect_tfrecords(self, path):
"""
Illustrates how to subselect files for training or validation
"""
all_filenames = os.listdir(path)
rng = np.random.RandomState(seed=SEED)
rng.shuffle(all_filenames)
return [os.path.join(path, fn) for fn in all_filenames
if fn.endswith('.tfrecords')]
def return_outputs(self, inputs, outputs, targets, **kwargs):
"""
Illustrates how to extract desired targets from the model
"""
retval = {}
for target in targets:
retval[target] = outputs[target]
return retval
def online_agg_mean(self, agg_res, res, step):
"""
Appends the mean value for each key
"""
if agg_res is None:
agg_res = {k: [] for k in res}
for k, v in res.items():
agg_res[k].append(np.mean(v))
return agg_res
if __name__ == '__main__':
"""
Illustrates how to run the configured model using tfutils
"""
base.get_params()
m = ImageNetYOLO()
params = m.setup_params()
while True:
try:
base.train_from_params(**params)
except Exception:
pass
params['train_params']['num_steps'] += 10000
| 36.959885
| 104
| 0.562137
|
1bb38437031f5e6b01ff219be2016a4a6b04381b
| 4,988
|
py
|
Python
|
gaiasdk/sdk.py
|
Shivakishore14/pythonsdk
|
ffc11cb642138c9b0a2eb69944afc2340cf46c24
|
[
"Apache-2.0"
] | null | null | null |
gaiasdk/sdk.py
|
Shivakishore14/pythonsdk
|
ffc11cb642138c9b0a2eb69944afc2340cf46c24
|
[
"Apache-2.0"
] | null | null | null |
gaiasdk/sdk.py
|
Shivakishore14/pythonsdk
|
ffc11cb642138c9b0a2eb69944afc2340cf46c24
|
[
"Apache-2.0"
] | null | null | null |
import sys
import time
import os
import grpc
import plugin_pb2
import plugin_pb2_grpc
from grpc_health.v1.health import HealthServicer
from grpc_health.v1 import health_pb2, health_pb2_grpc
from concurrent import futures
from fnvhash import fnv1a_32
from job import Job, Argument, ManualInteraction, GetJob, JobWrapper, InputType
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
cachedJobs = []
class ExitPipeline(Exception):
pass
class GRPCServer(plugin_pb2_grpc.PluginServicer):
"""Implementation of Plugin service."""
def GetJobs(self, request, context):
for job in cachedJobs:
yield job.job
def ExecuteJob(self, request, context):
job = GetJob(request.unique_id, cachedJobs)
if job == None:
return "job not found"
# transform args
args = []
if hasattr(request, "args"):
for arg in request.args:
a = Argument("", InputType.TextFieldInp, arg.key, arg.value)
args.append(a)
# Execute job
result = plugin_pb2.JobResult()
try:
job.handler(args)
except ExitPipeline, e:
result.exit_pipeline = True
result.unique_id = job.job.unique_id
result.message = str(e)
except Exception, e:
result.exit_pipeline = True
result.failed = True
result.unique_id = job.job.unique_id
result.message = str(e)
return result
def serve(jobs):
# Cache the jobs list for later processing.
# We first have to translate given jobs to different structure.
for job in jobs:
# Create proto object
p = plugin_pb2.Job()
# Manual interaction
if job.interaction != None:
p.interaction.description = job.interaction.description
p.interaction.type = job.interaction.inputType
p.interaction.value = job.interaction.value
# Arguments
args = []
if job.args:
for arg in job.args:
a = plugin_pb2.Argument()
a.description = arg.description
a.type = arg.inputType.value
a.key = arg.key
a.value = arg.value
args.append(a)
# Set the rest of the fields
p.unique_id = fnv1a_32(bytes(job.title))
p.title = job.title
p.description = job.description
p.args.extend(args)
# Resolve dependencies
if job.dependsOn:
for depJob in job.dependsOn:
for currJob in jobs:
if depJob.lower() == currJob.title.lower():
p.dependson.append(fnv1a_32(bytes(currJob.title)))
foundDep = True
break
if not foundDep:
raise Exception("job '" + job.title + "' has dependency '" + depJob + "' which is not declared")
# job wrapper object for this job
w = JobWrapper(job.handler, p)
cachedJobs.append(w)
# Check if two jobs have the same title which is restricted
for x, job in enumerate(cachedJobs):
for y, innerJob in enumerate(cachedJobs):
if x != y and job.job.unique_id == innerJob.job.unique_id:
raise Exception("duplicate job found (two jobs with same title)")
# get certificate path from environment variables
certPath = os.environ['GAIA_PLUGIN_CERT']
keyPath = os.environ['GAIA_PLUGIN_KEY']
caCertPath = os.environ['GAIA_PLUGIN_CA_CERT']
# check if all certs are available
if not os.path.isfile(certPath):
raise Exception("cannot find path to certificate")
if not os.path.isfile(keyPath):
raise Exception("cannot find path to key")
if not os.path.isfile(caCertPath):
raise Exception("cannot find path to root certificate")
# Open files
private_key = open(keyPath).read()
certificate_chain = open(certPath).read()
root_cert = open(caCertPath).read()
# We need to build a health service to work with go-plugin
health = HealthServicer()
health.set("plugin", health_pb2.HealthCheckResponse.ServingStatus.Value('SERVING'))
# Start the server.
server = grpc.server(futures.ThreadPoolExecutor(max_workers=20))
private_key_certificate_chain_pairs = ( (private_key, certificate_chain), )
server_credentials = grpc.ssl_server_credentials(private_key_certificate_chain_pairs, root_cert, True)
plugin_pb2_grpc.add_PluginServicer_to_server(GRPCServer(), server)
health_pb2_grpc.add_HealthServicer_to_server(health, server)
port = server.add_secure_port('127.0.0.1:0', server_credentials)
server.start()
# Output information
print("1|2|tcp|127.0.0.1:" + str(port) + "|grpc")
sys.stdout.flush()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
| 33.253333
| 116
| 0.623095
|
7c7be10b2be82b9084917bec34b49c8c483a4b91
| 1,748
|
py
|
Python
|
faas_test/test_cases/10-requests-lambda.py
|
jaimedantas/pacs_locust
|
df61bcad24776f6f6e395a281f99f5d764099669
|
[
"MIT"
] | null | null | null |
faas_test/test_cases/10-requests-lambda.py
|
jaimedantas/pacs_locust
|
df61bcad24776f6f6e395a281f99f5d764099669
|
[
"MIT"
] | null | null | null |
faas_test/test_cases/10-requests-lambda.py
|
jaimedantas/pacs_locust
|
df61bcad24776f6f6e395a281f99f5d764099669
|
[
"MIT"
] | null | null | null |
from locust import HttpUser, TaskSet, task, constant
from locust import LoadTestShape
from skimage.io import imread_collection
import base64
import random
import glob
class UserTasks(TaskSet):
@task
def get_root(self):
json = {"fileContents": str(getImage())[2:]}
self.client.post("/", data=None, json=json)
class WebsiteUser(HttpUser):
wait_time = constant(600)
tasks = [UserTasks]
class StagesShape(LoadTestShape):
"""
A simply load test shape class that has different user and spawn_rate at
different stages.
Keyword arguments:
stages -- A list of dicts, each representing a stage with the following keys:
duration -- When this many seconds pass the test is advanced to the next stage
users -- Total user count
spawn_rate -- Number of users to start/stop per second
stop -- A boolean that can stop that test at a specific stage
stop_at_end -- Can be set to stop once all stages have run.
"""
threshold = 14400000
before = 1
userIdle = 1
totalTestTime = 1
def tick(self):
run_time = self.get_run_time()
if run_time < self.threshold:
tick_data = (self.userIdle, self.userIdle)
self.before = self.userIdle
return tick_data
return None
def getImage():
data = []
col_dir = 'images_set/sample/*.jpg'
col = imread_collection(col_dir)
for filename in glob.glob(col_dir):
with open(filename, "rb") as img_file:
b64_string = base64.b64encode(img_file.read())
data.append(str(b64_string))
index = random.randint(0, len(data)-1)
return data[index]
if __name__ == "__main__":
print(getImage())
| 28.193548
| 90
| 0.649886
|
9825e5980e5baff22e5e7191e1def2f561b1db7f
| 10,129
|
py
|
Python
|
src/Network/test/test1/wild_VS_wild_mutant_1_neighbor80_test02.py
|
ruiyangsong/mCNN
|
889f182245f919fb9c7a8d97965b11576b01a96c
|
[
"MIT"
] | null | null | null |
src/Network/test/test1/wild_VS_wild_mutant_1_neighbor80_test02.py
|
ruiyangsong/mCNN
|
889f182245f919fb9c7a8d97965b11576b01a96c
|
[
"MIT"
] | null | null | null |
src/Network/test/test1/wild_VS_wild_mutant_1_neighbor80_test02.py
|
ruiyangsong/mCNN
|
889f182245f919fb9c7a8d97965b11576b01a96c
|
[
"MIT"
] | null | null | null |
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
import os, sys
import numpy as np
from sklearn.utils import class_weight
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras.utils import to_categorical
from keras import Input, models, layers, regularizers, callbacks, optimizers
from keras.utils import to_categorical
def data():
random_seed = 10
# data = np.load('/public/home/sry/mCNN/dataset/S2648/feature/mCNN/wild/npz/center_CA_PCA_False_neighbor_30.npz')
data = np.load('/dl/sry/mCNN/dataset/S2648/feature/mCNN/wild/npz/center_CA_PCA_False_neighbor_80.npz')
# data = np.load('E:/projects/mCNN/yanglab/mCNN-master/dataset/S2648/mCNN/wild/center_CA_PCA_False_neighbor_30.npz')
x = data['x']
y = data['y']
ddg = data['ddg'].reshape(-1)
train_num = x.shape[0]
indices = [i for i in range(train_num)]
np.random.seed(random_seed)
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
positive_indices, negative_indices = ddg >= 0, ddg < 0
x_positive, x_negative = x[positive_indices], x[negative_indices]
y_positive, y_negative = y[positive_indices], y[negative_indices]
left_positive, left_negative = round(0.8 * x_positive.shape[0]), round(0.8 * x_negative.shape[0])
x_train, x_test = np.vstack((x_positive[:left_positive], x_negative[:left_negative])), np.vstack(
(x_positive[left_positive:], x_negative[left_negative:]))
y_train, y_test = np.vstack((y_positive[:left_positive], y_negative[:left_negative])), np.vstack(
(y_positive[left_positive:], y_negative[left_negative:]))
# sort row default is chain
# reshape and one-hot
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# normalization
train_shape = x_train.shape
test_shape = x_test.shape
col_train = train_shape[-1]
col_test = test_shape[-1]
x_train = x_train.reshape((-1, col_train))
x_test = x_test.reshape((-1, col_test))
mean = x_train.mean(axis=0)
std = x_train.std(axis=0)
std[np.argwhere(std == 0)] = 0.01
x_train -= mean
x_train /= std
x_test -= mean
x_test /= std
x_train = x_train.reshape(train_shape)
x_test = x_test.reshape(test_shape)
# reshape
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
return x_train, y_train, x_test, y_test
def Conv2DClassifierIn1(x_train,y_train,x_test,y_test):
summary = True
verbose = 1
# setHyperParams------------------------------------------------------------------------------------------------
batch_size = {{choice([512,256,128,64,32])}}
epoch = {{choice([300,275,250,225,200,175,150,125,100,75,50,25])}}
conv_block={{choice(['four', 'three', 'two'])}}
conv1_num={{choice([64,32,16,8])}}
conv2_num={{choice([128,64,32,16])}}
conv3_num={{choice([128,64,32])}}
conv4_num={{choice([256,128,64,32])}}
dense1_num={{choice([512, 256, 128])}}
dense2_num={{choice([256, 128, 64])}}
l1_regular_rate = {{uniform(0.00001, 1)}}
l2_regular_rate = {{uniform(0.000001, 1)}}
drop1_num={{uniform(0.1, 1)}}
drop2_num={{uniform(0.0001, 1)}}
activator={{choice(['tanh','relu','elu'])}}
optimizer={{choice(['SGD','rmsprop','adam'])}}
#---------------------------------------------------------------------------------------------------------------
kernel_size = (3, 3)
pool_size = (2, 2)
initializer = 'random_uniform'
padding_style = 'same'
loss_type='binary_crossentropy'
metrics=['accuracy']
my_callback = None
# early_stopping = EarlyStopping(monitor='val_loss', patience=4)
# checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
# verbose=1,
# save_best_only=True)
# my_callback = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,
# patience=5, min_lr=0.0001)
# build --------------------------------------------------------------------------------------------------------
input_layer = Input(shape=x_train.shape[1:])
conv = layers.Conv2D(conv1_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(input_layer)
conv = layers.Conv2D(conv1_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(conv)
if conv_block == 'two':
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
elif conv_block == 'three':
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
conv = layers.Conv2D(conv3_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv3_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
elif conv_block == 'four':
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
conv = layers.Conv2D(conv3_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv3_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
conv = layers.Conv2D(conv4_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv4_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
flat = layers.Flatten()(pool)
drop = layers.Dropout(drop1_num)(flat)
dense = layers.Dense(dense1_num, activation=activator, kernel_regularizer=regularizers.l1_l2(l1=l1_regular_rate,l2=l2_regular_rate))(drop)
BatchNorm = layers.BatchNormalization(axis=-1)(dense)
drop = layers.Dropout(drop2_num)(BatchNorm)
dense = layers.Dense(dense2_num, activation=activator, kernel_regularizer=regularizers.l1_l2(l1=l1_regular_rate,l2=l2_regular_rate))(drop)
output_layer = layers.Dense(len(np.unique(y_train)),activation='softmax')(dense)
model = models.Model(inputs=input_layer, outputs=output_layer)
if summary:
model.summary()
# train(self):
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train.reshape(-1))
class_weights_dict = dict(enumerate(class_weights))
model.compile(optimizer=optimizer,
loss=loss_type,
metrics=metrics # accuracy
)
result = model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=epoch,
verbose=verbose,
callbacks=my_callback,
validation_data=(x_test, y_test),
shuffle=True,
class_weight=class_weights_dict
)
validation_acc = np.amax(result.history['val_acc'])
print('Best validation acc of epoch:', validation_acc)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
# config TF-----------------------------------------------------------------------------------------------------
CUDA, max_eval = sys.argv[1:]
os.environ['CUDA_VISIBLE_DEVICES'] = CUDA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
# x_train, y_train, x_test, y_test = data()
# Conv2DClassifierIn1(x_train, y_train, x_test, y_test)
best_run, best_model = optim.minimize(model=Conv2DClassifierIn1,
data=data,
algo=tpe.suggest,
max_evals=int(max_eval),
keep_temp=False,
trials=Trials())
for trial in Trials():
print(trial)
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)
| 48.004739
| 146
| 0.63412
|
f670a125ee2c5e7be2e17dfc270fccd15230b08e
| 3,702
|
py
|
Python
|
openmc/deplete/integrator/predictor.py
|
wbinventor/openmc
|
02a281889bffcaa2573e1a090f006442d350d2f6
|
[
"MIT"
] | 1
|
2019-04-10T12:41:16.000Z
|
2019-04-10T12:41:16.000Z
|
openmc/deplete/integrator/predictor.py
|
wbinventor/openmc
|
02a281889bffcaa2573e1a090f006442d350d2f6
|
[
"MIT"
] | 5
|
2015-03-11T02:28:25.000Z
|
2018-11-07T14:10:28.000Z
|
openmc/deplete/integrator/predictor.py
|
dryuri92/openmc
|
e28e42e8c250cd1ad586d1d9fd1d20847ad92edd
|
[
"MIT"
] | null | null | null |
"""First-order predictor algorithm."""
import copy
from collections.abc import Iterable
from .cram import deplete
from ..results import Results
def predictor(operator, timesteps, power=None, power_density=None,
print_out=True):
r"""Deplete using a first-order predictor algorithm.
Implements the first-order predictor algorithm. This algorithm is
mathematically defined as:
.. math::
y' &= A(y, t) y(t)
A_p &= A(y_n, t_n)
y_{n+1} &= \text{expm}(A_p h) y_n
Parameters
----------
operator : openmc.deplete.TransportOperator
The operator object to simulate on.
timesteps : iterable of float
Array of timesteps in units of [s]. Note that values are not cumulative.
power : float or iterable of float, optional
Power of the reactor in [W]. A single value indicates that the power is
constant over all timesteps. An iterable indicates potentially different
power levels for each timestep. For a 2D problem, the power can be given
in [W/cm] as long as the "volume" assigned to a depletion material is
actually an area in [cm^2]. Either `power` or `power_density` must be
specified.
power_density : float or iterable of float, optional
Power density of the reactor in [W/gHM]. It is multiplied by initial
heavy metal inventory to get total power if `power` is not speficied.
print_out : bool, optional
Whether or not to print out time.
"""
if power is None:
if power_density is None:
raise ValueError(
"Neither power nor power density was specified.")
if not isinstance(power_density, Iterable):
power = power_density*operator.heavy_metal
else:
power = [i*operator.heavy_metal for i in power_density]
if not isinstance(power, Iterable):
power = [power]*len(timesteps)
# Generate initial conditions
with operator as vec:
# Initialize time and starting index
if operator.prev_res is None:
t = 0.0
i_res = 0
else:
t = operator.prev_res[-1].time[-1]
i_res = len(operator.prev_res) - 1
chain = operator.chain
for i, (dt, p) in enumerate(zip(timesteps, power)):
# Get beginning-of-timestep concentrations and reaction rates
# Avoid doing first transport run if already done in previous
# calculation
if i > 0 or operator.prev_res is None:
x = [copy.deepcopy(vec)]
op_results = [operator(x[0], p)]
# Create results, write to disk
Results.save(operator, x, op_results, [t, t + dt], p, i_res + i)
else:
# Get initial concentration
x = [operator.prev_res[-1].data[0]]
# Get rates
op_results = [operator.prev_res[-1]]
op_results[0].rates = op_results[0].rates[0]
# Scale reaction rates by ratio of powers
power_res = operator.prev_res[-1].power
ratio_power = p / power_res
op_results[0].rates *= ratio_power[0]
# Deplete for full timestep
x_end = deplete(chain, x[0], op_results[0].rates, dt, print_out)
# Advance time, update vector
t += dt
vec = copy.deepcopy(x_end)
# Perform one last simulation
x = [copy.deepcopy(vec)]
op_results = [operator(x[0], power[-1])]
# Create results, write to disk
Results.save(operator, x, op_results, [t, t], p, i_res + len(timesteps))
| 35.596154
| 80
| 0.598865
|
6b907588c356b4c37d4e200707d701b0ffe65864
| 302
|
py
|
Python
|
Implementation/10170_NFC West vs North/10170_NFC West vs North.py
|
7dudtj/BOJ_myCode
|
37d105590a7963e2232102b3098fea3c3504b96f
|
[
"MIT"
] | 1
|
2022-03-30T15:50:47.000Z
|
2022-03-30T15:50:47.000Z
|
Implementation/10170_NFC West vs North/10170_NFC West vs North.py
|
7dudtj/BOJ_myCode
|
37d105590a7963e2232102b3098fea3c3504b96f
|
[
"MIT"
] | null | null | null |
Implementation/10170_NFC West vs North/10170_NFC West vs North.py
|
7dudtj/BOJ_myCode
|
37d105590a7963e2232102b3098fea3c3504b96f
|
[
"MIT"
] | 1
|
2021-07-20T07:11:06.000Z
|
2021-07-20T07:11:06.000Z
|
print("""NFC West W L T
-----------------------
Seattle 13 3 0
San Francisco 12 4 0
Arizona 10 6 0
St. Louis 7 9 0
NFC North W L T
-----------------------
Green Bay 8 7 1
Chicago 8 8 0
Detroit 7 9 0
Minnesota 5 10 1""")
| 21.571429
| 32
| 0.390728
|
986efd8e332e074282ca3978bd5a60da4ba3f5c2
| 938
|
py
|
Python
|
AnotherPrimeProblem.py
|
mayanksahu33/HackerEarth-1
|
680336d8e27de525b2dfeaf3fda6c7d2640bf93f
|
[
"MIT"
] | null | null | null |
AnotherPrimeProblem.py
|
mayanksahu33/HackerEarth-1
|
680336d8e27de525b2dfeaf3fda6c7d2640bf93f
|
[
"MIT"
] | null | null | null |
AnotherPrimeProblem.py
|
mayanksahu33/HackerEarth-1
|
680336d8e27de525b2dfeaf3fda6c7d2640bf93f
|
[
"MIT"
] | 1
|
2020-10-01T06:54:42.000Z
|
2020-10-01T06:54:42.000Z
|
"""
Given a number N find the smallest number K such that it satisfies all the 3 rules -
i) K >= N .
ii) K is a prime number
iii) K ≡ 1 (mod 11 )
"""
def SpecialSieveOfEratosthenes(low,up):
if low == 1:
low = 2
prime = [True for i in range(up + 1)]
p = 2
while (p * p <= up):
if (prime[p] == True):
for i in range(p * 2, up + 1, p):
prime[i] = False
p += 1
prime[0]= False
prime[1]= False
res = []
for p in range(low,up +1):
if prime[p] and p % 11 == 1:
res.append(p)
return res
primes = SpecialSieveOfEratosthenes(2,pow(10,6)+100)
from bisect import bisect
from sys import stdin, stdout
N = int(input())
for _ in range(N):
n = int(stdin.readline()) #for fast input
pos = bisect(primes,n)
if (primes[pos-1] == n):
stdout.write(str(n)+'\n')
else:
stdout.write(str(primes[pos]) +'\n')
| 18.76
| 84
| 0.536247
|
148b6ea6d9f027d9e69eac44e63b9f9be9371bd9
| 4,860
|
py
|
Python
|
venv/lib/python3.8/site-packages/jupyter_server/tests/extension/test_manager.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 1
|
2022-03-17T12:56:14.000Z
|
2022-03-17T12:56:14.000Z
|
venv/lib/python3.8/site-packages/jupyter_server/tests/extension/test_manager.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/jupyter_server/tests/extension/test_manager.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 1
|
2022-03-28T09:19:34.000Z
|
2022-03-28T09:19:34.000Z
|
import os
import unittest.mock as mock
import pytest
from jupyter_core.paths import jupyter_config_path
from jupyter_server.extension.manager import ExtensionManager
from jupyter_server.extension.manager import ExtensionMetadataError
from jupyter_server.extension.manager import ExtensionModuleNotFound
from jupyter_server.extension.manager import ExtensionPackage
from jupyter_server.extension.manager import ExtensionPoint
# Use ServerApps environment because it monkeypatches
# jupyter_core.paths and provides a config directory
# that's not cross contaminating the user config directory.
pytestmark = pytest.mark.usefixtures("jp_environ")
def test_extension_point_api():
# Import mock extension metadata
from .mockextensions import _jupyter_server_extension_points
# Testing the first path (which is an extension app).
metadata_list = _jupyter_server_extension_points()
point = metadata_list[0]
module = point["module"]
app = point["app"]
e = ExtensionPoint(metadata=point)
assert e.module_name == module
assert e.name == app.name
assert app is not None
assert callable(e.load)
assert callable(e.link)
assert e.validate()
def test_extension_point_metadata_error():
# Missing the "module" key.
bad_metadata = {"name": "nonexistent"}
with pytest.raises(ExtensionMetadataError):
ExtensionPoint(metadata=bad_metadata)
def test_extension_point_notfound_error():
bad_metadata = {"module": "nonexistent"}
with pytest.raises(ExtensionModuleNotFound):
ExtensionPoint(metadata=bad_metadata)
def test_extension_package_api():
# Import mock extension metadata
from .mockextensions import _jupyter_server_extension_points
# Testing the first path (which is an extension app).
metadata_list = _jupyter_server_extension_points()
path1 = metadata_list[0]
app = path1["app"]
e = ExtensionPackage(name="jupyter_server.tests.extension.mockextensions")
e.extension_points
assert hasattr(e, "extension_points")
assert len(e.extension_points) == len(metadata_list)
assert app.name in e.extension_points
assert e.validate()
def test_extension_package_notfound_error():
with pytest.raises(ExtensionModuleNotFound):
ExtensionPackage(name="nonexistent")
def _normalize_path(path_list):
return [p.rstrip(os.path.sep) for p in path_list]
def test_extension_manager_api(jp_serverapp):
jpserver_extensions = {"jupyter_server.tests.extension.mockextensions": True}
manager = ExtensionManager(serverapp=jp_serverapp)
assert manager.config_manager
expected = _normalize_path(os.path.join(jupyter_config_path()[0], "serverconfig"))
assert _normalize_path(manager.config_manager.read_config_path[0]) == expected
manager.from_jpserver_extensions(jpserver_extensions)
assert len(manager.extensions) == 1
assert "jupyter_server.tests.extension.mockextensions" in manager.extensions
def test_extension_manager_linked_extensions(jp_serverapp):
name = "jupyter_server.tests.extension.mockextensions"
manager = ExtensionManager(serverapp=jp_serverapp)
manager.add_extension(name, enabled=True)
manager.link_extension(name)
assert name in manager.linked_extensions
def test_extension_manager_fail_add(jp_serverapp):
name = "jupyter_server.tests.extension.notanextension"
manager = ExtensionManager(serverapp=jp_serverapp)
manager.add_extension(name, enabled=True) # should only warn
jp_serverapp.reraise_server_extension_failures = True
with pytest.raises(ExtensionModuleNotFound):
manager.add_extension(name, enabled=True)
def test_extension_manager_fail_link(jp_serverapp):
name = "jupyter_server.tests.extension.mockextensions.app"
with mock.patch(
"jupyter_server.tests.extension.mockextensions.app.MockExtensionApp.parse_command_line",
side_effect=RuntimeError,
):
manager = ExtensionManager(serverapp=jp_serverapp)
manager.add_extension(name, enabled=True)
manager.link_extension(name) # should only warn
jp_serverapp.reraise_server_extension_failures = True
with pytest.raises(RuntimeError):
manager.link_extension(name)
def test_extension_manager_fail_load(jp_serverapp):
name = "jupyter_server.tests.extension.mockextensions.app"
with mock.patch(
"jupyter_server.tests.extension.mockextensions.app.MockExtensionApp.initialize_handlers",
side_effect=RuntimeError,
):
manager = ExtensionManager(serverapp=jp_serverapp)
manager.add_extension(name, enabled=True)
manager.link_extension(name)
manager.load_extension(name) # should only warn
jp_serverapp.reraise_server_extension_failures = True
with pytest.raises(RuntimeError):
manager.load_extension(name)
| 36.541353
| 97
| 0.767284
|
08266a115b2e4569363ad793c9a7e2eaa6331846
| 64,247
|
py
|
Python
|
tensorflow/python/eager/function.py
|
cameronaaron/tensorflow
|
dae1e7c69336d28897abf7853bde10110d48b15f
|
[
"Apache-2.0"
] | 1
|
2019-04-30T00:55:14.000Z
|
2019-04-30T00:55:14.000Z
|
tensorflow/python/eager/function.py
|
h7lost/tensorflow
|
8ae7343f3d24569b4bb142ddc7b58037267a2d3c
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/eager/function.py
|
h7lost/tensorflow
|
8ae7343f3d24569b4bb142ddc7b58037267a2d3c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Defun decorator for defining graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
import sys
import threading
import types as types_lib
import weakref
import numpy as np
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# This is to avoid a circular dependency with gradients_impl
gradients_impl._function = sys.modules[__name__] # pylint: disable=protected-access
FORWARD_FUNCTION_ATTRIBUTE_NAME = "forward_function_name"
BACKWARD_FUNCTION_ATTRIBUTE_NAME = "backward_function_name"
# TODO(scottzhu): Update this to allow arbitrary attribute names in future.
WHITELIST_FUNCTION_ATTRIBUTE_REGEX = [
"experimental_.*",
FORWARD_FUNCTION_ATTRIBUTE_NAME,
BACKWARD_FUNCTION_ATTRIBUTE_NAME
]
def _parse_func_attrs(attributes):
"""Convert the keyword arguments into function_def attributes.
Currently only support primitive types: bool, int, float and string.
Args:
attributes: the dictionary of attributes.
Returns:
A dict of attributes where the key is the name of attribute and the value
is the AttrValue proto.
Raises:
ValueError: If the kwargs contains unwhitelisted name or unsupported value
types.
"""
attrs = {}
for key, value in attributes.items():
if not any([re.match(reg, key)
for reg in WHITELIST_FUNCTION_ATTRIBUTE_REGEX]):
raise ValueError("Attribute name is not whitelisted. "
"Whitelisted: prefix %s, got: %s" %
(WHITELIST_FUNCTION_ATTRIBUTE_REGEX, key))
if isinstance(value, attr_value_pb2.AttrValue):
attrs[key] = value
# bool type check has to happen before int since bool is a subclass of int.
elif isinstance(value, bool):
attrs[key] = attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
attrs[key] = attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
attrs[key] = attr_value_pb2.AttrValue(f=value)
elif isinstance(value, (str, bytes)):
attrs[key] = attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(key, type(value)))
return attrs
def _forward_name(n):
"""The name of a generated forward defun named n."""
return "__forward_%s_%s" % (n, ops.uid())
def _backward_name(n):
"""The name of a generated backward defun named n."""
return "__backward_%s_%s" % (n, ops.uid())
def _inference_name(n):
"""The name of a forward-but-no-gradient defun named n."""
return "__inference_%s_%s" % (n, ops.uid())
# TODO(apassos) get rid of this by splitting framework.function._DefinedFunction
# so it doesn't have the definition-generating logic and is just a container for
# an already-defined function.
class _EagerDefinedFunction(object):
"""Callable with the interface of `framework.function._DefinedFunction.`
`_EagerDefinedFunction` encapsulates a function definition and its properties,
and it provides a method for calling the encapsulated function. Some Ops
take functions as attributes, which have type `func`; an instance of this
class may be provided as the value of these `func` attributes.
"""
def __init__(self, name, graph, inputs, outputs, attrs):
"""Initializes an eager defined function.
Args:
name: str, the name for the created function.
graph: Graph, the graph containing the operations in the function
inputs: the tensors in the graph to be used as inputs to the function
outputs: the tensors in the graph which will be outputs to the function
attrs: dict mapping names of attributes to their AttrValue values
"""
operations = [
op for op in graph.get_operations()
if op not in set(arg.op for arg in inputs)
]
fn = pywrap_tensorflow.TF_GraphToFunction_wrapper(
graph._c_graph, # pylint: disable=protected-access
compat.as_str(name),
False,
[o._c_op for o in operations], # pylint: disable=protected-access
[t._as_tf_output() for t in inputs], # pylint: disable=protected-access
[t._as_tf_output() for t in outputs], # pylint: disable=protected-access
[],
None,
compat.as_str(""))
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(iga): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use status.
pywrap_tensorflow.TF_FunctionSetAttrValueProto(
fn, compat.as_str(name), serialized)
# TODO(apassos) avoid creating a FunctionDef (specially to grab the
# signature, but also in general it's nice not to depend on it.
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TF_FunctionToFunctionDef(fn, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
function_def = function_pb2.FunctionDef()
function_def.ParseFromString(compat.as_bytes(proto_data))
with ops.init_scope():
if context.executing_eagerly():
context.add_function(fn)
self.definition = function_def
self.name = compat.as_bytes(function_def.signature.name)
self.signature = function_def.signature
self._num_outputs = len(self.signature.output_arg)
self._output_types = [o.type for o in self.signature.output_arg]
self._output_shapes = [o.shape for o in outputs]
self._func_graph_outputs = outputs
self.grad_func_name = None
self.python_grad_func = None
self._c_func = c_api_util.ScopedTFFunction(fn)
self._grad_func = None
self._graph = graph
self._stateful_ops = tuple(op for op in operations if op.op_def.is_stateful)
def add_to_graph(self, g):
# pylint: disable=protected-access
if self.name not in g._functions:
g._add_function(self)
for f in self._graph._functions.values():
if f.name not in g._functions:
g._add_function(f)
# pylint: enable=protected-access
@property
def stateful_ops(self):
return self._stateful_ops
def call(self, ctx, args):
"""Calls this function with `args` as inputs.
Function execution respects device annotations only if the function won't
be compiled with xla.
Args:
ctx: a Context object
args: a list of arguments to supply this function with.
Returns:
The outputs of the function call.
Raises:
ValueError: if the number of arguments is incorrect.
"""
executing_eagerly = ctx.executing_eagerly()
if self._graph._xla_compile: # pylint: disable=protected-access
# XLA compilation relies upon a custom kernel creator to run functions.
signature = self.signature
if executing_eagerly:
outputs = execute.execute(
str(signature.name),
num_outputs=self._num_outputs,
inputs=args,
attrs=None,
ctx=ctx)
else:
g = ops.get_default_graph()
self.add_to_graph(g)
op = g.create_op(
signature.name,
[ops.internal_convert_to_tensor(x, ctx=ctx) for x in args],
tuple(dtypes_module.DType(x.type) for x in signature.output_arg),
op_def=signature,
name="FunctionCall",
compute_shapes=False)
outputs = op.outputs
if not outputs:
return op
outputs = [outputs] if isinstance(
outputs, (ops.Tensor, type(None))) else list(outputs)
else:
# TODO(akshayka): Either remove this if the FunctionLibraryRuntime
# creates `PartitionedCallOp` kernels by default, or remove the previous
# branch if a TPU kernel is registered for `PartitionedCall`.
if len(args) != len(self.signature.input_arg):
raise ValueError(
"Arguments and signature arguments do not match: %s %s " %
(len(args), len(list(self.signature.input_arg))))
function_call_options = ctx.get_function_call_options()
outputs = functional_ops.partitioned_call(
args=args,
f=self,
tout=self._output_types,
executing_eagerly=executing_eagerly,
config=function_call_options.rewriter_config_serialized,
executor_type=function_call_options.executor_type)
if executing_eagerly:
return outputs
else:
for i, shape in enumerate(self._output_shapes):
outputs[i].set_shape(shape)
for i, func_graph_output in enumerate(self._func_graph_outputs):
custom_gradient.copy_handle_data(func_graph_output, outputs[i])
return outputs
class Function(object):
"""Callable object encapsulating a function definition and its gradient.
`Function` is a callable that encapsulates a function definition and
is differentiable under `tf.GradientTape` objects.
"""
def __init__(self, func_graph, attrs=None, signature=None):
"""Initialize a Function.
Args:
func_graph: An instance of FuncGraph: the function body to wrap.
attrs: (optional) dict mapping names of attributes to their AttrValue
values. Attributes in `attrs` will be included in this function's
definition.
signature: a nested sequence of `TensorSpec` objects specifying the input
signature of this function.
Raises:
ValueError: If number of input_placeholders is not equal to the number
of function inputs.
"""
self._arg_keywords = None
self._num_positional_args = None
self._func_graph = func_graph
self._captured_inputs = list(self._func_graph.captures.keys())
self._num_outputs = len(self._func_graph.outputs)
self._output_shapes = tuple(
output.shape for output in self._func_graph.outputs)
self._attrs = _parse_func_attrs(attrs or {})
self._inference_function = _EagerDefinedFunction(
_inference_name(self._func_graph.name), self._func_graph,
self._func_graph.inputs, self._func_graph.outputs, self._attrs)
self._backward_graph_function = None
self._signature = signature
self._gradient_name = None
def __call__(self, *args, **kwargs):
"""Executes the wrapped function.
Args:
*args: Tensors or Variables. Positional arguments are only accepted when
they correspond one-to-one with arguments of the traced Python function.
**kwargs: Tensors or Variables specified by name. When
`get_concrete_function` was called to create this `Function`, each
Tensor input was given a name, defaulting to the name of the Python
function's argument but possibly overridden by the `name=` argument to
`tf.TensorSpec`. These names become the argument names for the concrete
function.
Returns:
The result of applying the TF function on the given Tensors.
Raises:
AssertionError: If this `Function` was not created through
`get_concrete_function`.
ValueError: If arguments contains anything other than Tensors or
Variables.
TypeError: For invalid positional/keyword argument combinations.
"""
if self._arg_keywords is None or self._num_positional_args is None:
if self._signature:
if kwargs:
raise NotImplementedError(
"Keyword arguments not supported when calling a "
"wrap_function-decorated function.")
return self._call_flat(args)
raise AssertionError(
"Tried to call a concrete function obtained from an interal API "
"through the public interface. Use get_concrete_function instead.")
if len(args) > self._num_positional_args:
raise TypeError(
("Expected at most {} positional arguments ({}), got {}. When "
"calling a concrete function, positional arguments may not be bound "
"to Tensors within nested structures.").format(
self._num_positional_args,
self._arg_keywords[:self._num_positional_args],
args))
args = list(args)
for keyword in self._arg_keywords[len(args):]:
args.append(kwargs.pop(compat.as_str(keyword)))
if kwargs:
positional_arg_keywords = set(self._arg_keywords[:len(args)])
for unused_key in kwargs:
if unused_key in positional_arg_keywords:
raise TypeError("Got two values for keyword '{}'.".format(unused_key))
raise TypeError("Keyword arguments {} unknown.".format(kwargs.keys()))
return self._call_flat(args)
def _filtered_call(self, args, kwargs):
"""Executes the function, filtering arguments from the Python function.
Objects aside from Tensors and Variables are ignored.
Args:
args: Canonicalized positional arguments of the Python function.
kwargs: Canonicalized keyword arguments of the Python function.
Returns:
The result of applying the function on the Tensors/Variables contained in
`args` and `kwargs`.
"""
return self._call_flat(
(t for t in nest.flatten((args, kwargs))
if isinstance(
t, (ops.Tensor, resource_variable_ops.ResourceVariable))))
def _call_flat(self, args):
"""Executes the wrapped function.
Args:
args: a list of Tensors or Variables.
Returns:
The result of applying the TF function to `args`.
Raises:
ValueError: If `args` contains anything other than Tensors or Variables.
"""
ctx = context.context()
for v in self._func_graph.variables:
if v.trainable:
tape.variable_accessed(v)
tensor_inputs = []
for i, arg in enumerate(args):
if isinstance(arg, resource_variable_ops.ResourceVariable):
if arg.trainable:
tape.variable_accessed(arg)
tensor_inputs.append(arg.handle)
elif isinstance(arg, ops.Tensor):
tensor_inputs.append(arg)
elif (self._signature is not None and
isinstance(self._signature[i], tensor_spec.TensorSpec)):
tensor_inputs.append(
ops.convert_to_tensor(arg, self._signature[i].dtype))
else:
raise ValueError("All inputs to `Function`s must be Tensors; "
"on invocation of %s, the %d-th input (%s) was not a "
"Tensor." % (self._func_graph.name, i, str(arg)))
args = tensor_inputs + self._captured_inputs
if (tape.should_record(tensor_inputs) or
tape.should_record(self._captured_inputs)):
return self._backprop_call(args)
# Only need to override the gradient in graph mode and when we have outputs.
if context.executing_eagerly() or not self.outputs:
outputs = self._inference_function.call(ctx, args)
else:
if not self._gradient_name:
self._gradient_name = "PartitionedCall-%s" % ops.uid()
self._register_gradient(self._gradient_name)
with ops.get_default_graph().gradient_override_map(
{"PartitionedCall": self._gradient_name,
"StatefulPartitionedCall": self._gradient_name}):
outputs = self._inference_function.call(ctx, args)
return self._build_call_outputs(outputs)
def _register_gradient(self, name):
"""Registers the gradient for the current Function under the given name.
The gradient rewrites an inference call op to a forward call op, but does
not modify a pre-existing forward call op. It then computes the gradient
from the output's gradients and the side outputs of the forward op.
Args:
name: The name to register the gradient as.
"""
@ops.RegisterGradient(name)
def grad_fn(op, *doutputs): # pylint: disable=unused-variable
"""Gradients of this function."""
if self._backward_graph_function is None:
self._construct_backprop_function()
# pylint: disable=protected-access
self._forward_function.add_to_graph(op.graph)
num_inference_outputs = self._inference_function._num_outputs
# Rewrite an inference call op to be a forward call op
if op.get_attr("f").name.encode() == self._inference_function.name:
func = attr_value_pb2.AttrValue(
func=attr_value_pb2.NameAttrList(
name=self._forward_function.name))
op._set_attr("f", func)
types = attr_value_pb2.AttrValue.ListValue(
type=self._forward_function._output_types)
op._set_attr("Tout", attr_value_pb2.AttrValue(list=types))
for i in range(
num_inference_outputs, len(self._forward_function._output_types)):
t = ops.Tensor(op, i, self._forward_function._output_types[i])
t.set_shape(self._forward_function._output_shapes[i])
func_graph_output = self._forward_function._func_graph_outputs[i]
custom_gradient.copy_handle_data(func_graph_output, t)
op._outputs.append(t)
# pylint: enable=protected-access
# Compute the gradients using the side outputs
side_outputs = op.outputs[num_inference_outputs:]
args = list(doutputs[:num_inference_outputs]) + list(side_outputs)
return self._backward_graph_function._call_flat( # pylint: disable=protected-access
(a for a in args if a is not None))
@property
def name(self):
"""Function name."""
return self._inference_function.name
@property
def graph(self):
"""Returns the graph from which this function was constructed."""
return self._func_graph
@property
def inputs(self):
"""Returns tensors in `self.graph` corresponding to arguments."""
return self._func_graph.inputs
@property
def outputs(self):
"""Returns tensors in `self.graph` corresponding to return values."""
return self._func_graph.outputs
@property
def captured_inputs(self):
"""Returns external Tensors captured by this function.
self.__call__(*args) passes `args + self.captured_inputs` to the function.
"""
return self._captured_inputs
@property
def function_def(self):
"""Returns a `FunctionDef` object representing this function."""
return self._inference_function.definition
@property
def output_shapes(self):
"""The function's output shapes."""
# TODO(ebrevdo): Should we only keep the output shapes associated
# with len(self._python_returns) outputs?
# TODO(akshayka): Consider removing this.
outputs_list = nest.flatten(self._func_graph.structured_outputs)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
if isinstance(o, ops.IndexedSlices):
# Extract the shape of the `IndexedSlices` object's `values` field.
outputs_list[i] = self._output_shapes[j] # the `values` shape
if o.dense_shape is not None:
j += 3 # skip over shapes for `values`, `indices`, `dense_shape`
else:
j += 2 # skip over shapes for `values`, `indices`
else:
outputs_list[i] = self._output_shapes[j]
j += 1
return nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list)
@property
def output_dtypes(self):
# TODO(akshayka): Consider removing this.
return nest.map_structure(lambda x: x.dtype if x is not None else None,
self._func_graph.structured_outputs)
def add_to_graph(self, g=None, register_gradient_functions=False):
"""Registers the function, adds it to the graph g or default graph."""
# If we are not executing eagerly, adds the function to default graph if no
# graph is specified.
# In case of eager execution, function definition gets added to context
# during construction itself.
# TODO(allel/shivaniagrawal): rename this to register to reflect the
# method's functionality better. Remove register_gradient_functions argument
# and figure out if these needs to be registered.
if not context.executing_eagerly() or g:
if not g:
g = ops.get_default_graph()
self._inference_function.add_to_graph(g) # pylint: disable=protected-access
# pylint: disable=protected-access
if register_gradient_functions:
# There are two situations for the actual call of a defun:
# 1. If none of the input args are resource variables or watch by any
# tape, and it will run the _inference_function of concrete_func for
# forward pass, the gradient will be generated by standard mechanism.
# 2. Otherwise, defun will create two functions, one for forward pass,
# and the backward pass will be created via tape.
# When registering the function, we register both cases.
if self._backward_graph_function is None:
self._construct_backprop_function()
forward_function = self._forward_function
backward_function = self._backward_graph_function._inference_function
# pylint: enable=protected-access
forward_function.add_to_graph(g)
backward_function.add_to_graph(g)
def _construct_backprop_function(self):
"""Constructs the backprop function object for this function."""
backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
forward_function_name = _forward_name(self._func_graph.name)
outputs = [x for x in self._func_graph.outputs
if gradients_impl.IsTrainable(x)]
with backwards_graph.as_default():
gradients_wrt_outputs = [
graph_placeholder(x.dtype, x.shape) for x in outputs
]
gradients_wrt_inputs = gradients_impl._GradientsHelper( # pylint: disable=protected-access
outputs,
self._func_graph.inputs,
grad_ys=gradients_wrt_outputs,
src_graph=self._func_graph)
backwards_graph_captures = list(backwards_graph.captures.keys())
backward_function_attr = _parse_func_attrs(
{FORWARD_FUNCTION_ATTRIBUTE_NAME: forward_function_name})
backward_function_attr.update(self._attrs)
# The ordering of `backwards_graph.inputs` is important: inputs of
# `self._backward_graph_function` correspond to outputs of
# `self._forward_function`.
backwards_graph.inputs = gradients_wrt_outputs + list(
backwards_graph.captures.values())
# Clear captures, since we pass them in as inputs.
backwards_graph.captures = {}
backwards_graph.outputs.extend(
grad for grad in func_graph_module.flatten(gradients_wrt_inputs)
if grad is not None)
backwards_graph.structured_outputs = gradients_wrt_inputs
self._backward_graph_function = Function(
backwards_graph, attrs=backward_function_attr)
forward_function_attr = _parse_func_attrs({
BACKWARD_FUNCTION_ATTRIBUTE_NAME:
self._backward_graph_function._inference_function.name}) # pylint: disable=protected-access
forward_function_attr.update(self._attrs)
self._forward_function = _EagerDefinedFunction(
forward_function_name, self._func_graph, self._func_graph.inputs,
self._func_graph.outputs + backwards_graph_captures,
forward_function_attr)
def _backprop_call(self, args):
"""Calls the forward function and records the result on a tape.
(Only records results on a tape if the function has outputs)
Args:
args: All inputs to the function, including resolved captured inputs
Returns:
The call output.
"""
if self._backward_graph_function is None:
self._construct_backprop_function()
ctx = context.context()
if not self._gradient_name:
self._gradient_name = "PartitionedCall-%s" % ops.uid()
self._register_gradient(self._gradient_name)
with ops.get_default_graph().gradient_override_map(
{"PartitionedCall": self._gradient_name,
"StatefulPartitionedCall": self._gradient_name}):
outputs = self._forward_function.call(ctx, args)
if isinstance(outputs, ops.Operation) or outputs is None:
return outputs
# `real_outputs` are the actual outputs of the inference graph function;
# `side_outputs` are the intermediate Tensors that were added as outputs to
# the forward graph function so that we can compute its gradient.
real_outputs = outputs[:self._num_outputs]
skip_positions = [i for i, t in enumerate(real_outputs)
if not gradients_impl.IsTrainable(t)]
side_outputs = outputs[self._num_outputs:]
def backward_function(*args):
args = [a for i, a in enumerate(args)
if a is not None and i not in skip_positions]
return self._backward_graph_function._call_flat( # pylint: disable=protected-access
list(args) + side_outputs)
tape.record_operation(self._forward_function.signature.name, real_outputs,
args, backward_function)
return self._build_call_outputs(real_outputs)
def _build_call_outputs(self, result):
"""Maps the fdef output list to actual output structure.
Args:
result: Output lists defined by FunctionDef.
Returns:
The actual call output.
"""
if self._func_graph.structured_outputs is None:
return result
# Use `nest.flatten` instead of `func_graph_module.flatten` in order to
# preserve any IndexedSlices in `self._func_graph.structured_outputs`.
outputs_list = nest.flatten(self._func_graph.structured_outputs)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
if isinstance(o, ops.IndexedSlices):
# Repack Tensors for IndexedSlices.
if o.dense_shape is not None:
outputs_list[i] = ops.IndexedSlices(
values=result[j],
indices=result[j + 1],
dense_shape=result[j + 2])
j += 3
else:
outputs_list[i] = ops.IndexedSlices(
values=result[j], indices=result[j + 1])
j += 2
else:
outputs_list[i] = result[j]
j += 1
ret = nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list)
return ret
pywrap_tensorflow.RegisterType("Tensor", ops.Tensor)
pywrap_tensorflow.RegisterType("IndexedSlices", ops.IndexedSlices)
def _deterministic_dict_values(dictionary):
return tuple(dictionary[key] for key in sorted(dictionary))
class PolymorphicFunction(object):
"""Wrapper class for the graph functions defined for a Python function.
See the documentation for `defun` for more information on the semantics of
defined functions.
PolymorphicFunction class is thread-compatible meaning that minimal
usage of defuns (defining and calling) is thread-safe, but if users call other
methods or invoke the base `python_function` themselves, external
synchronization is necessary.
"""
def __init__(self,
python_function,
name,
input_signature=None,
attributes=None,
experimental_autograph=False):
"""Initializes a polymorphic function.
Args:
python_function: the function to be wrapped.
name: the name given to it.
input_signature: a possibly nested sequence of `TensorSpec` objects
specifying the input signature of this function. If `None`, a separate
function is instantiated for each inferred input signature.
attributes: dict, extra keyword arguments that will be added as attribute
of the function.
experimental_autograph: whether to use autograph to compile
`python_function`. See https://www.tensorflow.org/guide/autograph for
more information.
Raises:
ValueError: if `input_signature` is not None and the `python_function`'s
argspec has keyword arguments.
"""
if isinstance(python_function, functools.partial):
self._python_function = python_function.func
self._args_to_prepend = python_function.args or tuple()
self._kwargs_to_include = python_function.keywords or {}
else:
self._python_function = python_function
self._args_to_prepend = tuple()
self._kwargs_to_include = {}
self._name = name
self._experimental_autograph = experimental_autograph
self._function_cache = collections.OrderedDict()
self._function_attributes = attributes or {}
self._lock = threading.Lock()
# _descriptor_cache is a of instance of a class to an instance-specific
# PolymorphicFunction, used to make sure defun-decorated methods create
# different functions for each instance.
self._descriptor_cache = weakref.WeakKeyDictionary()
fullargspec = tf_inspect.getfullargspec(self._python_function)
if tf_inspect.ismethod(self._python_function):
# Remove `self`: default arguments shouldn't be matched to it.
args = fullargspec.args[1:]
else:
args = fullargspec.args
# A cache mapping from argument name to index, for canonicalizing
# arguments that are called in a keyword-like fashion.
self._args_to_indices = {arg: i for i, arg in enumerate(args)}
self._arg_names = args
self._vararg_name = fullargspec.varargs
# A cache mapping from arg index to default value, for canonicalization.
offset = len(args) - len(fullargspec.defaults or [])
self._arg_indices_to_default_values = {
offset + index: default
for index, default in enumerate(fullargspec.defaults or [])
}
self._default_values = fullargspec.defaults
self._default_values_start_index = offset
if input_signature is None:
self._input_signature = None
else:
if fullargspec.varkw is not None or fullargspec.kwonlyargs:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
if not isinstance(input_signature, (tuple, list)):
raise TypeError("input_signature must be either a tuple or a "
"list, received " + str(type(input_signature)))
self._input_signature = tuple(input_signature)
self._flat_input_signature = tuple(nest.flatten(input_signature))
def __call__(self, *args, **kwargs):
"""Calls a graph function specialized to the inputs."""
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
@property
def python_function(self):
"""Returns the wrapped Python function."""
return self._python_function
def _get_concrete_function_internal(self, *args, **kwargs):
"""Bypasses error checking when getting a graph function."""
if self._input_signature:
args, kwargs = None, None
graph_function, _, _ = self._maybe_define_function(args, kwargs)
return graph_function
def get_concrete_function(self, *args, **kwargs):
"""Returns a `Function` object specialized to inputs and execution context.
Args:
*args: inputs to specialize on.
**kwargs: inputs to specialize on.
"""
if self._input_signature:
if kwargs:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
if args:
# If args are provided, they must match the input signature.
try:
nest.assert_same_structure(self._input_signature, args)
except (ValueError, TypeError):
raise ValueError("Structure of Python function inputs does not match "
"input_signature.")
flat_inputs = nest.flatten(args)
if any(not isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec))
for arg in flat_inputs):
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be Tensors or "
"tf.TensorSpec objects.")
if any(not spec.is_compatible_with(other)
for spec, other in zip(self._flat_input_signature, flat_inputs)):
raise ValueError("Python inputs incompatible with input_signature: "
"inputs (%s), input_signature (%s)" %
(str(args), str(self._input_signature)))
args, kwargs = None, None
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
if self._input_signature:
args = self._input_signature
kwargs = {}
seen_names = set()
captured = frozenset(graph_function.graph.internal_captures)
allowed_positional = 0
if args:
for outer_arg in args:
# TODO(allenl): Consider allowing arguments with defaults in the Python
# function's signature to be passed as positional arguments to the
# concrete function.
if not isinstance(
outer_arg,
(ops.Tensor, resource_variable_ops.ResourceVariable,
tensor_spec.TensorSpec)):
break
allowed_positional += 1
# pylint: disable=protected-access
graph_function._num_positional_args = allowed_positional
graph_function._arg_keywords = []
# pylint: enable=protected-access
for arg in graph_function.graph.inputs:
if arg in captured:
break
user_arg_name = arg.op.get_attr("_user_specified_name")
if user_arg_name in seen_names:
raise ValueError(
("Unable to construct a concrete function for {} since some "
"arguments do not have unique names. Got two arguments named "
"'{}'. When constructing a concrete TensorFlow function from a "
"Python function which takes nested structures or variadic "
"positional arguments, pass unique names to tf.TensorSpec objects "
"used to identify these Tensor inputs. These names may then be "
"used as keyword arguments to the concrete function.")
.format(
self._python_function,
compat.as_str(arg.op.get_attr("_user_specified_name"))))
seen_names.add(user_arg_name)
graph_function._arg_keywords.append(user_arg_name) # pylint: disable=protected-access
return graph_function
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
# `instance` here is the instance that this `PolymorphicFunction` was
# accessed through; e.g., for
#
# class Foo(object):
#
# @function.defun
# def bar(self):
# ...
#
# foo = Foo()
# foo.bar() # `foo.bar` is a `PolymorphicFunction` instance
#
# then `instance` will be `foo` (and `owner` will be `Foo`). We create a
# new instance of PolymorphicFunction here to allow different instances each
# to create variables once, thereby allowing methods to be decorated with
# defun. Keeps a cache to avoid retracing the function every time the
# descriptor is accessed.
if instance not in self._descriptor_cache:
if instance is None:
return self
# If there is no instance-specific polymorphic func in the cache,
# we construct an instance-specific polymorphic function
# that uses a weak reference to the instance (so that the instance will
# be correctly gc'd).
# And finally add the wrapped function to the description cache
self._descriptor_cache[instance] = class_method_to_instance_method(
self, instance)
# Return the cached polymorphic function for the instance
return self._descriptor_cache[instance]
def _cache_key(self, args, kwargs):
"""Computes the cache key given inputs and execution context."""
if self._input_signature is None:
inputs = (args, kwargs) if kwargs else args
cache_key = pywrap_tensorflow.TFE_Py_EncodeArg(inputs)
else:
del args, kwargs
cache_key = self._flat_input_signature
ctx = context.context()
with ops.init_scope():
# The graph, or whether we're executing eagerly, should be a part of the
# cache key so we don't improperly capture tensors such as variables.
executing_eagerly = ctx.executing_eagerly()
execution_context = executing_eagerly or ops.get_default_graph()
# pylint: disable=protected-access
default_graph = ops.get_default_graph()
# TODO(b/117617952): The current distribution strategy will affect graph
# building (e.g. accessing different variables from different devices) and
# so requires retracing for each device.
uses_distribution_strategy = bool(
default_graph._distribution_strategy_stack)
if executing_eagerly:
colocation_stack = ()
uses_xla = ctx.device_spec.device_type == "TPU"
if uses_distribution_strategy or uses_xla:
device_functions = (pydev.merge_device(ctx.device_name),)
else:
device_functions = ()
else:
colocation_stack = tuple(default_graph._colocation_stack.peek_objs())
uses_xla = getattr(default_graph, "_xla_compile", False)
if (uses_distribution_strategy
or uses_xla
or func_graph_module.device_stack_has_callable(
default_graph._device_function_stack)):
# Putting the device in the cache key ensures that call-site device
# annotations are respected.
device_functions = tuple(default_graph._device_functions_outer_to_inner)
else:
device_functions = ()
# pylint: enable=protected-access
return (cache_key, execution_context, device_functions, colocation_stack,
uses_xla)
def _canonicalize_function_inputs(self, *args, **kwargs):
"""Canonicalizes `args` and `kwargs`.
Canonicalize the inputs to the Python function using its fullargspec. In
particular, we parse the varags and kwargs that this
`PolymorphicFunction` was called with into a tuple corresponding to the
Python function's positional (named) arguments and a dictionary
corresponding to its kwargs.
Args:
*args: The varargs this object was called with.
**kwargs: The keyword args this function was called with.
Returns:
A canonicalized ordering of the inputs.
Raises:
ValueError: If a keyword in `kwargs` cannot be matched with a positional
argument when an input signature is specified, or when the inputs
do not conform to the input signature.
"""
args = self._args_to_prepend + args
kwargs = dict(kwargs, **self._kwargs_to_include)
if not kwargs:
if self._default_values:
inputs = args + self._default_values[len(args) -
self._default_values_start_index:]
else:
inputs = args
else:
# Maps from index of arg to its corresponding value, according to `args`
# and `kwargs`; seeded with the default values for the named args that
# aren't in `args`.
arg_indices_to_values = {
index: default for index, default in six.iteritems(
self._arg_indices_to_default_values) if index >= len(args)
}
consumed_args = []
for arg, value in six.iteritems(kwargs):
index = self._args_to_indices.get(arg, None)
if index is not None:
arg_indices_to_values[index] = value
consumed_args.append(arg)
elif self._input_signature is not None:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
for arg in consumed_args:
# After this loop, `kwargs` will only contain true keyword arguments, as
# opposed to named arguments called in a keyword-like fashion.
kwargs.pop(arg)
inputs = args + _deterministic_dict_values(arg_indices_to_values)
flat_inputs = nest.flatten(inputs)
# Check for NumPy arrays in arguments and convert them to Tensors.
# TODO(nareshmodi): Skip ndarray conversion to tensor altogether, perhaps
# finding a way to store them directly in the cache key (currently not
# possible since ndarrays are not hashable).
need_packing = False
for index, value in enumerate(flat_inputs):
if type(value) == np.ndarray:
flat_inputs[index] = constant_op.constant(value)
need_packing = True
if need_packing:
inputs = nest.pack_sequence_as(structure=inputs,
flat_sequence=flat_inputs)
if self._input_signature is None:
return inputs, kwargs
else:
assert not kwargs
try:
nest.assert_same_structure(self._input_signature, inputs)
except (ValueError, TypeError):
raise ValueError("Structure of Python function inputs does not match "
"input_signature.")
if any(not pywrap_tensorflow.IsTensor(arg) for arg in flat_inputs):
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be Tensors.")
if any(not spec.is_compatible_with(other)
for spec, other in zip(self._flat_input_signature, flat_inputs)):
raise ValueError("Python inputs incompatible with input_signature: "
"inputs (%s), input_signature (%s)" %
(str(inputs), str(self._input_signature)))
return inputs, {}
def _maybe_define_function(self, args, kwargs):
"""Gets a function for these inputs, defining it if necessary.
`args` and `kwargs` can be None if this `PolymorphicFunction` was created
with an `input_signature`.
Args:
args: The varargs for the Python function.
kwargs: The keyword args for the Python function.
Returns:
A graph function corresponding to the input signature implied by args and
kwargs, as well as the inputs that the object should be called with.
Raises:
ValueError: If inputs are incompatible with the input signature.
TypeError: If the function inputs include non-hashable objects
"""
if self._input_signature is None or args is not None or kwargs is not None:
args, kwargs = self._canonicalize_function_inputs(*args, **kwargs)
cache_key = self._cache_key(args, kwargs)
with self._lock:
try:
graph_function = self._function_cache.get(cache_key, None)
except TypeError:
raise TypeError("Arguments supplied to `defun`-generated functions "
"must be hashable.")
if graph_function is None:
if self._input_signature is None:
arglen = len(args)
else:
arglen = len(self._input_signature)
arg_names = (
self._arg_names[:arglen]
+ [self._vararg_name] * (arglen - len(self._arg_names)))
graph_function = Function(
func_graph_module.func_graph_from_py_func(
self._name,
self._python_function,
args,
kwargs,
self._input_signature,
experimental_autograph=self._experimental_autograph,
arg_names=arg_names),
self._function_attributes)
self._function_cache[cache_key] = graph_function
return graph_function, args, kwargs
def register(func, *args, **kwargs):
"""Register a specialization of a PolymorphicFunction into the graph.
This won't actually call the function with the inputs, and only put the
function definition into graph. Register function with different input param
will result into multiple version of functions registered in graph.
Args:
func: the PolymorphicFunction instance that generated by a @defun
*args: input arguments for the Python function.
**kwargs: input keyword arguments for the Python function.
Returns:
a `Function` object specialized to inputs and execution context.
Raises:
ValueError: When the input function is not a defun wrapped python function.
"""
if not isinstance(func, PolymorphicFunction):
raise ValueError("Only defun function is allowed to be registered. "
"Got type: %s" % type(func))
concrete_func = func.get_concrete_function(*args, **kwargs)
concrete_func.add_to_graph(register_gradient_functions=True)
return concrete_func
def validate_signature(signature):
if any(not isinstance(arg, tensor_spec.TensorSpec)
for arg in nest.flatten(signature)):
raise TypeError("Invalid input_signature %s; input_signature must be "
"a possibly nested sequence of TensorSpec objects.")
def defun(func=None, input_signature=None, experimental_autograph=False):
"""Compiles a Python function into a callable TensorFlow graph.
`defun` (short for "define function") trace-compiles a Python function
composed of TensorFlow operations into a callable that executes a `tf.Graph`
containing those operations. The callable produced by `defun` contains only
the subgraph of TensorFlow operations that were executed when the Python
function was called with a particular input signature, defined as a list
of the shapes and dtypes of the Python function's Tensor-valued arguments and
the values of its non-Tensor Python objects. In particular, `defun` is _not_ a
compiler for arbitrary Python code.
When eager execution is enabled, the ability to create graphs from Python
functions makes it possible to incrementally trade off debugability and
interactivity for performance. Functions compiled with `defun` cannot be
inspected with `pdb` and `print` statements; however, executing a graph
generated by `defun` sometimes takes less time and memory than eagerly
executing the corresponding Python function, since specifying computations as
graphs allows for optimizations like automatic buffer reuse and
parallelization among ops. Note that executing a `defun`-compiled function
incurs a small constant overhead, so eagerly executing sufficiently small
Python functions might take less time than executing their corresponding
`defun`-generated graphs.
For a Python function to be compatible with `defun`, all of its arguments must
be hashable Python objects or lists thereof. The function itself may not
modify the list/map structure of its arguments. Additionally, it must return
zero or more `tf.Tensor` objects. If the Python function returns
a `tf.Variable`, its compiled version will return the value of that variable
as a `tf.Tensor`.
Executing a graph generated by `defun` respects device annotations (i.e.,
all `with tf.device` directives present in a Python function will also be
present in its corresponding graph), but it is not yet possible to execute the
generated graphs across multiple machines.
_Example Usage_
```python
import tensorflow as tf
tf.enable_eager_execution()
# A simple example.
def f(x, y):
return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)
g = tf.contrib.eager.defun(f)
x = tf.constant([[2.0, 3.0]])
y = tf.constant([[3.0, -2.0]])
# `f` and `g` will return the same value, but `g` will be executed as a
# TensorFlow graph.
assert f(x, y).numpy() == g(x, y).numpy()
# `defun` is capable of compiling Python functions that close over Python
# objects, including Tensors and Variables.
@tf.contrib.eager.defun
def h():
return f(x, y)
assert (h().numpy() == f(x, y).numpy()).all()
# `defun` automatically lifts variables out of the graphs it creates,
# allowing you to compile the `call` methods of `tf.keras.layers.Layer` and
# `tf.keras.Model` objects.
class MyModel(tf.keras.Model):
def __init__(self, keep_probability=0.2):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.keep_probability = keep_probability
@tf.contrib.eager.defun
def call(self, inputs, training=True):
x = self.dense2(self.dense1(inputs))
if training:
return tf.nn.dropout(x, self.keep_probability)
else:
return x
model = MyModel()
model(x, training=True) # executes a graph, with dropout
model(x, training=False) # executes a graph, without dropout
# `defun`-compiled functions are differentiable.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
with tf.GradientTape() as tape:
outputs = model(x)
gradient = tape.gradient(outputs, model.trainable_variables)
optimizer.apply_gradients((grad, var) for grad, var in zip(gradient,
model.trainable_variables))
```
When using `defun`, there are subtleties regarding inputs, Python control
flow, and variable creation that one should be aware of. For concreteness, let
`f` be a Python function that returns zero or more `tf.Tensor` objects and
let `F = defun(f)`. `F` builds a graph for each unique input signature it
sees, Python control flow is baked into graphs, and operations related to
variable initialization are automatically lifted out of the graphs that `F`
generates and placed in the eager context if executing eagerly or into an
outer graph otherwise.
_Input Signatures_
By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph
for every unique sequence of the shapes and dtypes of Tensor arguments and
the values of Python objects it is invoked with. For example, calling
`F(tf.random_uniform([2])` will execute a different graph than
`F(tf.random_uniform([3])` because the two inputs have different shapes.
The first time that `F(*args, **kwargs)` is called with a particular sequence
of Tensor shapes and dtypes and Python values, it constructs a graph by
tracing the execution of `f(*args, **kwargs)`; this graph is bound to an
input signature inferred from `(*args, **kwargs)` and cached for future reuse.
NumPy arrays passed as inputs to `F` are converted to `tf.Tensor` objects
before being passed to `f`, and are treated as Tensors for caching. This
allows a function to be called multiple times with NumPy arrays having
different values but the same shape and dtype without re-tracing each time.
`tf.contrib.eager.defun` caches graphs for your convenience, letting you
define TensorFlow functions without explicitly specifying their signatures.
However, this policy is conservative and potentially expensive; for example,
when different invocations of your function have differently-shaped Tensor
inputs, this policy might generate more graph functions than necessary. To
eliminate such costs, `tf.contrib.eager.defun` allows you to supply an
optional `input_signature` argument specifying the shapes and dtypes of the
inputs. In particular, the shapes may be partially unspecified, with `None`s
in the unknown dimensions. When an input signature is provided,
`tf.contrib.eager.defun` will only instantiate a single graph for the
decorated Python function. The following is an example:
```python
import tensorflow as tf
# The first `TensorSpec` below describes the shape and dtype of `words`,
# and the second describes the shape and dtype of `another_tensor`. Note that
# the last dimension of the `words` `TensorSpec` is left unspecified.
@tf.contrib.eager.defun(input_signature=[
tf.contrib.eager.TensorSpec(shape=[50, 300, None], dtype=tf.float32),
tf.contrib.eager.TensorSpec(shape=[300, 100], dtype=tf.float32)
])
def my_sequence_model(words, another_tensor):
...
# Note how the third dimension of the first input can vary freely.
words = tf.random_uniform(([50, 300, 10])
second_input = tf.random_uniform([300, 100])
my_sequence_model(words, second_input)
words = tf.random_uniform(([50, 300, 20])
my_sequence_model(words, second_input)
# Passing an input with an incompatible shape will raise an error.
words = tf.random_uniform(([50, 100, 20])
my_sequence_model(words, second_input) # <---- This will raise an error.
```
Python functions that are compiled with an `input_signature` must only accept
Tensors as arguments and must not take unnamed keyword arguments (**kwargs).
_Tracing_
Be aware that because `F` only logs TensorFlow operations, all the other
Python code that `f` executes will only shape the _construction_ of the graphs
that `F` executes: the Python code won't be executed when the graphs
themselves are executed, though it will be executed every time the Python
function is traced (and a given Python function might be traced multiple
times, once for each input signature it is invoked with). For example, whereas
the Python function
```python
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
def add_noise():
return tf.eye(5) + np.random.randn(5, 5)
```
will return a different output everytime it is invoked, the compiled function
`compiled = tf.contrib.eager.defun(add_noise)` will return the same value
every time it is called, since a particular random offset generated by NumPy
will be inserted into the graph as a TensorFlow constant. The solution is to
replace the call to `np.random.randn` with `tf.random_normal((5, 5))`.
_Python Side-Effects_
A corollary of the previous discussion on tracing is the following: If a
Python function `f` has Python side-effects, then executing `f` multiple times
will not necessarily be semantically equivalent to executing `F =
tf.contrib.eager.defun(f)` multiple times; this difference is due to the fact
that `defun` only captures the subgraph of TensorFlow operations that is
constructed when `f` is called in a graph-building context.
_Python Control Flow_.
The structure of many machine learning computations depend upon whether one is
training or validating, and it is common to nest specialized logic under `if
training:` blocks. By mapping each input signature to a unique graph, `defun`
lets users transparently compile such code, as the following code snippet
demonstrates:
```python
import tensorflow as tf
tf.enable_eager_execution()
@tf.contrib.eager.defun
def lossy_matmul(W, x, training=True):
outputs = tf.matmul(W, x)
if training:
outputs = tf.nn.dropout(outputs, keep_probability=0.2)
return outputs
W = tf.random_normal((3, 5))
x = tf.random_normal((5, 1))
# Executes a graph that applies dropout.
lossy_outputs = lossy_matmul(W, x, training=True)
# Executes a graph that does not apply dropout.
exact_outputs = lossy_matmul(W, x, training=False)
```
On the other hand, because `defun` generates graphs by tracing and not by
source code analysis, it fully unrolls Python `for` and `while` loops,
potentially creating large graphs. If your Python function has native loops
that run for many iterations, consider replacing them with `tf.while_loop`
operations.
When constructing graphs, `tf.Tensor` objects cannot be used as Python
`bool` objects. This means, for example, that you should replace code in `f`
resembling
```python
if tensor < 10:
true_fn()
else:
false_fn()
```
with `tf.cond(tensor < 10, true_fn, false_fn)`.
_Variables_
TensorFlow operations related to variable creation and initialization are
automatically lifted out of the graphs generated by `defun`. In practice, this
implies that variable creation and initialization only happen the first time
`F` is called, and that variables are reused every time thereafter. Many
TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the
first time they are called and reuse them thereafter. Automatic variable
lifting makes it possible to compile these APIs without extra effort, at the
cost of introducing a discrepancy between the semantics of executing Python
functions and their corresponding compiled functions. For example:
```python
import tensorflow as tf
tf.enable_eager_execution()
def fn():
x = tf.Variable(0.0)
x.assign_add(1.0)
return x.read_value()
# `fn` is a Python function, so x is created, initialized, and destroyed upon
# every invocation
assert fn().numpy() == fn().numpy() == 1.0
compiled = tf.contrib.eager.defun(fn)
# Compiling `fn` with `defun` hoists all variables outside of the generated
# graph, so initialization happens exactly once.
assert compiled().numpy() == 1.0
assert compiled().numpy() == 2.0
```
Finally, because each input signature is bound to a unique graph, if your
Python function constructs `tf.Variable` objects, then each graph constructed
for that Python function will reference a unique set of variables. To
circumvent this problem, we recommend against compiling Python functions that
create `tf.Variable` objects. Instead, Python functions should either
lexically close over `tf.Variable` objects or accept them as arguments,
preferably encapsulated in an object-oriented container. If you must create
variables inside your Python function and you want each graph generated for it
to reference the same set of variables, add logic to your Python function that
ensures that variables are only created the first time it is called and are
reused for every subsequent invocation; note that this is precisely what
`tf.keras.layers.Layer` objects do, so we recommend using them to represent
variable-bearing computations whenever possible.
Args:
func: function to be compiled. If `func` is None, returns a
decorator that can be invoked with a single argument - `func`. The
end result is equivalent to providing all the arguments up front.
In other words, defun(input_signature=...)(func) is equivalent to
defun(func, input_signature=...). The former allows
the following use case:
@tf.contrib.eager.defun(input_signature=...)
def foo(...):
...
input_signature: A possibly nested sequence of
`tf.contrib.eager.TensorSpec` objects specifying the shapes and dtypes of
the Tensors that will be supplied to this function. If `None`, a separate
function is instantiated for each inferred input signature. If a
signature is specified, every input to `func` must be a `Tensor`, and
`func` cannot accept `**kwargs`.
experimental_autograph: Whether `func` should be compiled before
constructing the graph. See https://www.tensorflow.org/guide/autograph
for more information.
Returns:
If `func` is not None, returns a callable that will execute the compiled
function (and return zero or more `tf.Tensor` objects).
If `func` is None, returns a decorator that, when invoked with a single
`func` argument, returns a callable equivalent to the case above.
Raises:
TypeError: If `input_signature` is neither `None` nor a sequence of
`tf.contrib.eager.TensorSpec` objects.
"""
return defun_with_attributes(
func=func,
input_signature=input_signature,
experimental_autograph=experimental_autograph)
def defun_with_attributes(func=None,
input_signature=None,
attributes=None,
experimental_autograph=False):
"""Compiles a Python function into a callable TensorFlow graph.
This function supports adding extra function attributes. See detailed
documentation in defun(). Currently this is not exposed in public API since we
don't expect user to directly use attributes, and attribute won't work by
itself. This assumption might change in future.
Args:
func: function to be compiled.
input_signature: same as defun()'s input_signature.
attributes: A dictionary of arguments which will be added to function def as
attributes. Currently only support primitive types as value, and only
whitelisted attribute name is allowed. Unwhitelisted attribute name or
unsupported value will result into ValueError. `func_name` is also one of
the whitelisted argument which is a python string, and sets the name for
this `Function` in the graph.
experimental_autograph: same as defun()'s experimental_autograph.
Returns:
Same as the return value of defun, with attributes added to the function in
graph.
"""
if input_signature is not None:
validate_signature(input_signature)
# TODO(apassos): deal with captured global state. Deal with control flow.
def decorated(function):
try:
if attributes:
name = attributes.pop("func_name", function.__name__)
else:
name = function.__name__
except AttributeError:
name = "function"
return tf_decorator.make_decorator(
function,
PolymorphicFunction(
function,
name,
input_signature=input_signature,
attributes=attributes,
experimental_autograph=experimental_autograph))
# This code path is for the `foo = tfe.defun(foo, ...)` use case
if func is not None:
return decorated(func)
# This code path is for the
#
# @tfe.defun(...)
# def foo(...):
# ...
#
# use case, which is equivalent to `foo = tfe.defun(...)(foo)`
return decorated
def class_method_to_instance_method(original_function, instance):
"""Constructs a new PolymorphicFunction with `self` bound."""
def make_partial_py_func(py_func, weak_instance):
return lambda *args, **kwargs: py_func(weak_instance(), *args, **kwargs)
weak_instance = weakref.ref(instance)
# pylint: disable=protected-access
# We make a dummy MethodType object to generate the correct bound method
# signature. The actual call is to a function with a weak reference to
# `instance`.
instance_func = type(original_function)(
tf_decorator.make_decorator(
types_lib.MethodType(original_function.python_function, False),
make_partial_py_func(original_function.python_function,
weak_instance)),
name=original_function._name,
input_signature=original_function._input_signature)
# pylint: enable=protected-access
# And we wrap the function with tf_decorator so inspection works correctly
wrapped_instance_func = tf_decorator.make_decorator(
original_function.python_function, instance_func)
return wrapped_instance_func
| 41.342986
| 104
| 0.701838
|
2c69943a79afa93b4d94888b96deecc5b0f6c639
| 2,292
|
py
|
Python
|
acapy_client/models/v10_presentation_send_request_request.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | 4
|
2021-08-05T09:20:34.000Z
|
2021-08-08T19:37:29.000Z
|
acapy_client/models/v10_presentation_send_request_request.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | null | null | null |
acapy_client/models/v10_presentation_send_request_request.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | 2
|
2021-08-12T18:18:45.000Z
|
2021-08-14T13:22:28.000Z
|
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.indy_proof_request import IndyProofRequest
from ..types import UNSET, Unset
T = TypeVar("T", bound="V10PresentationSendRequestRequest")
@attr.s(auto_attribs=True)
class V10PresentationSendRequestRequest:
""" """
connection_id: str
proof_request: IndyProofRequest
comment: Union[Unset, None, str] = UNSET
trace: Union[Unset, bool] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
connection_id = self.connection_id
proof_request = self.proof_request.to_dict()
comment = self.comment
trace = self.trace
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"connection_id": connection_id,
"proof_request": proof_request,
}
)
if comment is not UNSET:
field_dict["comment"] = comment
if trace is not UNSET:
field_dict["trace"] = trace
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
connection_id = d.pop("connection_id")
proof_request = IndyProofRequest.from_dict(d.pop("proof_request"))
comment = d.pop("comment", UNSET)
trace = d.pop("trace", UNSET)
v10_presentation_send_request_request = cls(
connection_id=connection_id,
proof_request=proof_request,
comment=comment,
trace=trace,
)
v10_presentation_send_request_request.additional_properties = d
return v10_presentation_send_request_request
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 29.012658
| 77
| 0.643543
|
e7256747040363a351644109c0396582708e5020
| 943
|
py
|
Python
|
tests/test_units/test_area.py
|
guillaume-florent/corelib
|
9e25b862b5eaf1fe1a9b68af070a743184f992ba
|
[
"MIT"
] | 1
|
2018-05-05T02:11:59.000Z
|
2018-05-05T02:11:59.000Z
|
tests/test_units/test_area.py
|
guillaume-florent/corelib
|
9e25b862b5eaf1fe1a9b68af070a743184f992ba
|
[
"MIT"
] | null | null | null |
tests/test_units/test_area.py
|
guillaume-florent/corelib
|
9e25b862b5eaf1fe1a9b68af070a743184f992ba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
r"""area.py tests"""
from corelib.units.area import cm2, convert
from corelib.units import convert_area
def test_areas():
r"""Test expected values"""
expected_value = 1e4
atol = 1e-10
assert expected_value - atol <= cm2(m2=1.) <= expected_value + atol
def test_convert_function():
r"""Test shortcut/api convert function"""
expected_value = 1e4
atol = 1e-10
assert expected_value - atol <= convert(1., "cm2", "m2") <= expected_value + atol
assert expected_value - atol <= convert(1., to_unit="cm2", from_unit="m2") <= expected_value + atol
def test_convert_area_function():
r"""Test shortcut/api convert function"""
expected_value = 1e4
atol = 1e-10
assert expected_value - atol <= convert_area(1., "cm2", "m2") <= expected_value + atol
assert expected_value - atol <= convert_area(1., to_unit="cm2", from_unit="m2") <= expected_value + atol
| 30.419355
| 108
| 0.677625
|
e5177d204145c4c598aff607d0f1607293e83497
| 6,969
|
py
|
Python
|
configure_data.py
|
AnastasiiaNovikova/sentiment-discovery
|
eaae55921038d674e2f16fbd0bfd2e63194a9545
|
[
"BSD-3-Clause"
] | null | null | null |
configure_data.py
|
AnastasiiaNovikova/sentiment-discovery
|
eaae55921038d674e2f16fbd0bfd2e63194a9545
|
[
"BSD-3-Clause"
] | null | null | null |
configure_data.py
|
AnastasiiaNovikova/sentiment-discovery
|
eaae55921038d674e2f16fbd0bfd2e63194a9545
|
[
"BSD-3-Clause"
] | 1
|
2019-03-13T11:43:13.000Z
|
2019-03-13T11:43:13.000Z
|
import os
import copy
import data_utils
class DataConfig(object):
def __init__(self, parser, defaults={}):
super(DataConfig,self).__init__()
self.parser = parser
self.defaults = defaults
def apply(self, opt):
print('configuring data')
for k, v in self.defaults.items():
if not hasattr(opt, k):
setattr(opt, k, v)
return make_loaders(opt)
def set_defaults(self, **kwargs):
for k, v in kwargs.items():
self.defaults[k] = v
def make_loaders(opt):
"""makes training/val/test"""
batch_size = opt.batch_size * opt.world_size
eval_batch_size = opt.eval_batch_size * opt.world_size
seq_length = opt.seq_length
if seq_length < 0:
seq_length = seq_length * opt.world_size
eval_seq_length = opt.eval_seq_length
if opt.eval_seq_length < 0:
eval_seq_length = eval_seq_length * opt.world_size
# TODO: fix data race in lazy loader
# data_loader_args = {'num_workers': 10, 'shuffle': opt.shuffle, 'batch_size': batch_size,
data_loader_args = {'num_workers': 1, 'shuffle': opt.shuffle, 'batch_size': batch_size,
'pin_memory': True, 'transpose': opt.transpose, 'distributed': opt.world_size > 1,
'rank': opt.rank, 'world_size': opt.world_size, 'drop_last': opt.world_size > 1}
split = get_split(opt)
data_set_args = {
'path': opt.data, 'seq_length': seq_length, 'cache': opt.cache,
'text_key': opt.text_key, 'label_key': opt.label_key, 'lazy': opt.lazy,
'preprocess': opt.preprocess, 'persist_state': opt.persist_state,
'cache_size': opt.batch_size, 'delim': opt.delim, 'num_shards': opt.num_shards,
'ds_type': opt.data_set_type, 'split': split, 'loose': opt.loose_json}
eval_loader_args = copy.copy(data_loader_args)
eval_set_args = copy.copy(data_set_args)
eval_set_args['split']=[1.]
# if optional eval args were set then replace their equivalent values in the arg dict
if opt.eval_batch_size != 0:
eval_loader_args['batch_size'] = eval_batch_size
eval_set_args['cache_size'] = eval_batch_size
if opt.eval_seq_length != 0:
eval_set_args['seq_length'] = eval_seq_length
if opt.eval_text_key != 'None':
eval_set_args['text_key'] = opt.eval_text_key
if opt.eval_label_key != 'None':
eval_set_args['label_key'] = opt.eval_label_key
train = None
valid = None
test = None
if opt.data != 'None':
train = data_utils.make_dataset(**data_set_args)
if should_split(split):
train, valid, test = train
if opt.valid != 'None':
eval_set_args['path'] = opt.valid
valid = data_utils.make_dataset(**eval_set_args)
if test is None and opt.test != 'None':
eval_set_args['path'] = opt.test
test = data_utils.make_dataset(**eval_set_args)
if train is not None and opt.batch_size > 0:
train = data_utils.DataLoader(train, **data_loader_args)
if valid is not None:
if opt.data_set_type == 'unsupervised':
if opt.eval_seq_length != 0:
valid.set_seq_len(eval_seq_length)
if opt.val_shards != 0:
valid.set_num_shards(opt.val_shards)
valid = data_utils.DataLoader(valid, **eval_loader_args)
if test is not None:
if opt.data_set_type == 'unsupervised':
if opt.eval_seq_length != 0:
test.set_seq_len(eval_seq_length)
if opt.test_shards != 0:
test.set_num_shards(opt.test_shards)
test = data_utils.DataLoader(test, **eval_loader_args)
return train, valid, test
def should_split(split):
return max(split) != 1.
def get_split(opt):
splits = []
if opt.split.find(',') != -1:
splits = [float(s) for s in opt.split.split(',')]
elif opt.split.find('/') != -1:
splits = [float(s) for s in opt.split.split('/')]
else:
splits = [float(opt.split)]
split_total = sum(splits)
if split_total < 1.:
splits.append(1-split_total)
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
if opt.valid != 'None':
splits[1] = 0.
if opt.test != 'None':
splits[2] = 0.
final_sum = sum(splits)
return [s/final_sum for s in splits]
def configure_data(parser):
"""add cmdline flags for configuring datasets"""
main_parser = parser
parser = parser.add_argument_group('data options')
parser.add_argument('--data', default='./data/imdb/unsup.json',
help="""Filename for training""")
parser.add_argument('--valid', default='None',
help="""Filename for validation""")
parser.add_argument('--test', default='None',
help="""Filename for testing""")
parser.add_argument('--batch_size', type=int, default=128,
help='Data Loader batch size')
parser.add_argument('--eval_batch_size', type=int, default=0,
help='Data Loader batch size for evaluation datasets')
parser.add_argument('--data_size', type=int, default=256,
help='number of tokens in data')
parser.add_argument('--loose_json', action='store_true',
help='Use loose json (one json-formatted string per newline), instead of tight json (data file is one json string)')
parser.add_argument('--preprocess', action='store_true',
help='force preprocessing of datasets')
parser.add_argument('--delim', default=',',
help='delimiter used to parse csv testfiles')
parser.add_argument('--split', default='1.',
help='comma-separated list of proportions for training, validation, and test split')
parser.add_argument('--text_key', default='sentence',
help='key to use to extract text from json/csv')
parser.add_argument('--label_key', default='label',
help='key to use to extract labels from json/csv')
parser.add_argument('--eval_text_key', default='None',
help='key to use to extract text from json/csv evaluation datasets')
parser.add_argument('--eval_label_key', default='None',
help='key to use to extract labels from json/csv evaluation datasets')
defaults = {
'world_size': 1,
'rank': -1,
'num_shards': 1002,
'val_shards': 0,
'test_shards': 0,
'cache': 0,
'persist_state': 0,
'lazy': False,
'shuffle': False,
'transpose': False,
'data_set_type': 'supervised',
'seq_length': 256,
'eval_seq_length': 256,
}
return DataConfig(main_parser, defaults=defaults), parser
| 42.493902
| 140
| 0.602525
|
059c4d8c5db93c327d9dbb3ed8d8bb89726f90a7
| 3,489
|
py
|
Python
|
asposepdfcloud/models/text_horizontal_alignment.py
|
kaferi/aspose-pdf-cloud-python
|
48f70742fec1e41644ec0b658db3f174ba845304
|
[
"MIT"
] | 7
|
2018-06-11T17:44:44.000Z
|
2022-02-08T05:52:48.000Z
|
asposepdfcloud/models/text_horizontal_alignment.py
|
kaferi/aspose-pdf-cloud-python
|
48f70742fec1e41644ec0b658db3f174ba845304
|
[
"MIT"
] | 1
|
2021-03-20T22:16:15.000Z
|
2021-06-27T15:11:52.000Z
|
asposepdfcloud/models/text_horizontal_alignment.py
|
kaferi/aspose-pdf-cloud-python
|
48f70742fec1e41644ec0b658db3f174ba845304
|
[
"MIT"
] | 4
|
2018-04-18T19:41:12.000Z
|
2021-06-21T13:12:24.000Z
|
# coding: utf-8
"""
Aspose.PDF Cloud API Reference
Copyright (c) 2021 Aspose.PDF Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
OpenAPI spec version: 3.0
"""
from pprint import pformat
from six import iteritems
import re
class TextHorizontalAlignment(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
LEFT = "Left"
RIGHT = "Right"
CENTER = "Center"
JUSTIFY = "Justify"
FULLJUSTIFY = "FullJustify"
NONE = "None"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
TextHorizontalAlignment - a model defined in Swagger
"""
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, TextHorizontalAlignment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.690476
| 78
| 0.603038
|
6266f28dd64c7e5050489c76c1394d171d6d93de
| 116
|
py
|
Python
|
venv/Lib/site-packages/_pytest/_version.py
|
sukhjindersukh/todo_api_automation
|
55ab6ab836a5f8f158ba78d550978e8e98d7bcb2
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/_pytest/_version.py
|
sukhjindersukh/todo_api_automation
|
55ab6ab836a5f8f158ba78d550978e8e98d7bcb2
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/_pytest/_version.py
|
sukhjindersukh/todo_api_automation
|
55ab6ab836a5f8f158ba78d550978e8e98d7bcb2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '4.6.4'
| 23.2
| 46
| 0.724138
|
d80074a7bd4ba376fd730bbfa139c50fe2404c5e
| 366
|
py
|
Python
|
presidio_evaluator/dataset_formatters/dataset_formatter.py
|
msebragge/presidio-research
|
305d18177c1cd3dc6bfac725677b1116a89d6f32
|
[
"MIT"
] | 55
|
2020-01-10T01:27:59.000Z
|
2022-02-25T09:43:36.000Z
|
presidio_evaluator/dataset_formatters/dataset_formatter.py
|
msebragge/presidio-research
|
305d18177c1cd3dc6bfac725677b1116a89d6f32
|
[
"MIT"
] | 19
|
2020-02-28T20:17:43.000Z
|
2021-12-22T19:18:12.000Z
|
presidio_evaluator/dataset_formatters/dataset_formatter.py
|
msebragge/presidio-research
|
305d18177c1cd3dc6bfac725677b1116a89d6f32
|
[
"MIT"
] | 30
|
2020-01-25T21:46:14.000Z
|
2021-12-09T06:05:24.000Z
|
from abc import ABC, abstractmethod
from typing import List
from presidio_evaluator import InputSample
class DatasetFormatter(ABC):
@abstractmethod
def to_input_samples(self) -> List[InputSample]:
"""
Translate a dataset structure into a list of documents, to be used by models and for evaluation
:return:
"""
pass
| 24.4
| 103
| 0.691257
|
48625018246dfbebaca090eb48100e018a2f49ba
| 12,182
|
py
|
Python
|
omaha/omaha_version_utils.py
|
huhisoft/omaha
|
5ba31ce6214feca9e5bb8d70f30b74e4ac1c659e
|
[
"Apache-2.0"
] | 1
|
2021-11-18T19:23:31.000Z
|
2021-11-18T19:23:31.000Z
|
omaha/omaha_version_utils.py
|
huhisoft/omaha
|
5ba31ce6214feca9e5bb8d70f30b74e4ac1c659e
|
[
"Apache-2.0"
] | null | null | null |
omaha/omaha_version_utils.py
|
huhisoft/omaha
|
5ba31ce6214feca9e5bb8d70f30b74e4ac1c659e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Constants and utilities related to Omaha and tools versions"""
_ONECLICK_PLUGIN_NAME = 'npHuhiOneClick'
_UPDATE_PLUGIN_NAME = 'npHuhiUpdate'
_BHO_NAME = 'GoopdateBho'
_CRASH_HANDLER_NAME = 'HuhiCrashHandler'
# List of languages that are fully supported in the current build.
_OMAHA_LANGUAGES = [
'am',
'ar',
'bg',
'bn',
'ca',
'cs',
'da',
'de',
'el',
'en',
'en-GB',
'es',
'es-419',
'et',
'fa',
'fi',
'fil',
'fr',
'gu',
'hi',
'hr',
'hu',
'id',
'is',
'it',
'iw',
'ja',
'kn',
'ko',
'lt',
'lv',
'ml',
'mr',
'ms',
'nl',
'no',
'pl',
'pt-BR',
'pt-PT',
'ro',
'ru',
'sk',
'sl',
'sr',
'sv',
'sw',
'ta',
'te',
'th',
'tr',
'uk',
'ur',
'vi',
'zh-CN',
'zh-TW',
]
# The shell and goopdate.dll contain additional languages.
# 'userdefault' addresses apps that don't look up the resource for the OS
# language. See http://b/1328652.
_ADDITIONAL_SHELL_LANGUAGES = [
'or',
'userdefault',
'zh-HK',
]
VC71 = 1310 # VC2003/VC71 (not supported by the current build).
VC80 = 1400 # VC2005/VC80
VC90 = 1500 # VC2008/VC90 (not supported by the current build).
VC100 = 1600 # VC2010/VC10
VC110 = 1700 # VC2012/VC11 (not supported by the current build).
VC120 = 1800 # VC2013/VC12
VC140 = 1900 # VC2015/VC14
def _IsSupportedOmaha2Version(omaha_version):
"""Returns true if omaha_version is an Omaha 2 version and is supported."""
return (omaha_version[0] == 1 and
omaha_version[1] == 2 and
omaha_version[2] >= 183)
# All languages supported by this script currently have the same set of
# languages, so the omaha_version_info parameter is unused.
def _GetMetainstallerPayloadFilenames(prefix,
update_plugin_filename,
bho_filename,
languages,
omaha_version):
"""Returns list of metainstaller payload files for specified Omaha version."""
plugin_dll_name = '%s%s' % (prefix, update_plugin_filename)
bho_dll_name = '%s%s' % (prefix, bho_filename)
# The list of files below needs to be kept in sync with the list in
# SetupFiles::BuildFileLists().
# TODO(omaha): Move the other filename defines in main.scons into this file
# and allow all filenames to be customized. At the moment, while the plugin
# names are generated in one place due to version numbers, most of the other
# files (googleupdate.exe, goopdateres_*.dll, etc.) are hardcoded all over
# the place, and require a ton of point fixes to customize.
payload_files = [
'HuhiUpdate.exe',
'%s.exe' % _CRASH_HANDLER_NAME,
'%sgoopdate.dll' % (prefix),
plugin_dll_name,
bho_dll_name,
'HuhiUpdateHelper.msi',
'HuhiUpdateBroker.exe',
'HuhiUpdateOnDemand.exe',
'HuhiUpdateComRegisterShell64.exe',
'HuhiUpdateWebPlugin.exe',
'%spsmachine.dll' % (prefix),
'%spsmachine_64.dll' % (prefix),
'%spsuser.dll' % (prefix),
'%spsuser_64.dll' % (prefix),
]
if (omaha_version[0] >= 1 and
omaha_version[1] >= 3 and
omaha_version[2] >= 13):
# The BHO is not built yet.
payload_files.remove(bho_dll_name)
elif _IsSupportedOmaha2Version(omaha_version):
payload_files.remove(plugin_dll_name)
payload_files.remove('HuhiUpdateBroker.exe')
payload_files.remove('HuhiUpdateOnDemand.exe')
payload_files.remove('HuhiUpdateComRegisterShell64.exe')
payload_files.remove('psmachine.dll')
payload_files.remove('psmachine_64.dll')
payload_files.remove('psuser.dll')
payload_files.remove('psuser_64.dll')
else:
raise Exception('Unsupported version: ' +
ConvertVersionToString(omaha_version))
if (omaha_version[0] >= 1 and
omaha_version[1] >= 3 and
(omaha_version[2] >= 22 or
(omaha_version[2] == 21 and omaha_version[3] >= 85))):
# 64-bit crash handler is added on 1.3.21.85 and later
payload_files.append('%s64.exe' % _CRASH_HANDLER_NAME)
if (omaha_version[0] >= 1 and
omaha_version[1] >= 3 and
(omaha_version[2] >= 32)):
# added with 1.3.32.1 and later
payload_files.append('HuhiUpdateCore.exe')
for language in languages:
payload_files += ['%sgoopdateres_%s.dll' % (prefix, language)]
return payload_files
def ConvertVersionToString(version):
"""Converts a four-element version list to a version string."""
return '%d.%d.%d.%d' % (version[0], version[1], version[2], version[3])
def GetONECLICK_PLUGIN_NAME(): # pylint: disable-msg=C6409
"""Returns the value of the ONECLICK_PLUGIN_NAME define for the C++ code."""
return _ONECLICK_PLUGIN_NAME
def GetUPDATE_PLUGIN_NAME(): # pylint: disable-msg=C6409
"""Returns the value of the UPDATE_PLUGIN_NAME define for the C++ code."""
return _UPDATE_PLUGIN_NAME
def GetBHO_NAME(): # pylint: disable-msg=C6409
"""Returns the value of the BHO_NAME define for the C++ code."""
return _BHO_NAME
def GetCRASH_HANDLER_NAME(): # pylint: disable-msg=C6409
"""Returns the value of the CRASH_HANDLER_NAME define for the C++ code."""
return _CRASH_HANDLER_NAME
def GetLanguagesForVersion(omaha_version):
"""Returns a list of languages supported by omaha_version."""
# Make a copy in case the list is modified below.
supported_languages = list(_OMAHA_LANGUAGES)
# When languages are added, add a version check for older versions without the
# new languages and remove the new languages from supported_languages.
if (omaha_version[0] == 1 and
omaha_version[1] == 3 and
omaha_version[2] >= 21):
# All languages are supported.
pass
elif _IsSupportedOmaha2Version(omaha_version):
# All current languages are supported. 'or' was also supported.
supported_languages += ['or']
supported_languages.remove('am')
supported_languages.remove('sw')
else:
raise Exception('Unsupported version: ' +
ConvertVersionToString(omaha_version))
return supported_languages
def GetShellLanguagesForVersion(omaha_version):
"""Returns a list of languages supported by the omaha_version shell."""
# Silence PyLint. All languages supported by this script currently have the
# same set of languages, so this variable is unused.
omaha_version = omaha_version
return _OMAHA_LANGUAGES + _ADDITIONAL_SHELL_LANGUAGES
class OmahaVersionInfo(object):
"""Contains information about a specific version of Omaha.
Attributes:
filename_prefix: Prefix to use for all output files.
version_major: Major version.
version_minor: Minor version.
version_build: Build version.
version_patch: Patch version.
oneclick_plugin_version: Version of the OneClick plug-in.
oneclick_plugin_filename: Name of the signed OneClick DLL.
update_plugin_version: Version of the Omaha 3 plug-in.
update_plugin_filename: Name of the signed Omaha 3 plug-in DLL.
bho_filename: Name of the signed BHO DLL.
crash_handler_filename: Name of the Crash Handler EXE.
oneclick_signed_file_info: SignedFileInfo object for the OneClick DLL.
bho_signed_file_info: SignedFileInfo object for the BHO DLL.
"""
def __init__(self, version_file):
"""Initializes the class based on data from a VERSION file."""
self._ReadFile(version_file)
self.filename_prefix = ''
# Objects containing more properties used to build the file.
self.oneclick_signed_file_info = SignedFileInfo(
_ONECLICK_PLUGIN_NAME,
'dll',
self.oneclick_plugin_version)
self.plugin_signed_file_info = SignedFileInfo(
_UPDATE_PLUGIN_NAME,
'dll',
self.update_plugin_version)
self.bho_signed_file_info = SignedFileInfo(_BHO_NAME, 'dll')
# Simple properties for callers that only need the final filename. Not
# affected by internal build changes.
self.oneclick_plugin_filename = self.oneclick_signed_file_info.filename
self.update_plugin_filename = self.plugin_signed_file_info.filename
self.bho_filename = self.bho_signed_file_info.filename
self.crash_handler_filename = _CRASH_HANDLER_NAME
def _ReadFile(self, version_file):
"""Reads and stores data from a VERSION file."""
execfile(version_file, globals())
# Silence Pylint. Values from version_file are not defined in this file.
# E0602: Undefined variable.
# pylint: disable-msg=E0602
if version_patch > 0:
incrementing_value = version_patch
incrementing_value_name = 'patch'
else:
incrementing_value = version_build
incrementing_value_name = 'build'
if 0 == incrementing_value % 2:
raise Exception('ERROR: By convention, the %s number in VERSION '
'(currently %d) should be odd.' %
(incrementing_value_name, incrementing_value))
self.version_major = version_major
self.version_minor = version_minor
self.version_build = version_build
self.version_patch = version_patch
self.oneclick_plugin_version = oneclick_plugin_version
# update_plugin_version does not exist in Omaha 2 VERSION file. Handle this.
try:
self.update_plugin_version = update_plugin_version
except NameError:
if _IsSupportedOmaha2Version(self.GetVersion()):
self.update_plugin_version = -1
else:
raise
# pylint: enable-msg=E0602
def MakeTestVersion(self, delta=1):
"""Changes this object to be for a TEST version of Omaha."""
if delta <= 0:
raise Exception('Delta must be greater than 0.')
# If we're doing a patch, increment patch; else, increment build.
if self.version_patch > 0:
self.version_patch += delta
else:
self.version_build += delta
self.filename_prefix = 'TEST_'
def GetVersion(self):
"""Returns the version elements as a list."""
return [self.version_major,
self.version_minor,
self.version_build,
self.version_patch
]
def GetVersionString(self):
"""Returns the version as a string."""
return ConvertVersionToString(self.GetVersion())
def GetSupportedLanguages(self):
"""Returns a list of languages supported by this version."""
return GetLanguagesForVersion(self.GetVersion())
def GetMetainstallerPayloadFilenames(self):
"""Returns list of metainstaller payload files for this version of Omaha."""
return _GetMetainstallerPayloadFilenames(self.filename_prefix,
self.update_plugin_filename,
self.bho_filename,
self.GetSupportedLanguages(),
self.GetVersion())
class SignedFileInfo(object):
"""Contains information, including intermediate names, for signed file."""
def __init__(self, unversioned_name, extension, file_version=None):
"""Initializes the class members based on the parameters."""
if file_version:
base_name = '%s%d' % (unversioned_name, file_version)
else:
base_name = unversioned_name
self.filename_base = base_name
self.filename = '%s.%s' % (self.filename_base, extension)
self.unsigned_filename_base = '%s_unsigned' % base_name
self.unsigned_filename = '%s.%s' % (self.unsigned_filename_base, extension)
| 32.227513
| 80
| 0.669102
|
86b928c5e304308fe9ae4b7530ecce29fad3594c
| 1,576
|
py
|
Python
|
tests/test_comment.py
|
barackmaund1/flaskblog
|
3b6c40612b3483b37b60eaeb807c5f33e6d00505
|
[
"MIT"
] | null | null | null |
tests/test_comment.py
|
barackmaund1/flaskblog
|
3b6c40612b3483b37b60eaeb807c5f33e6d00505
|
[
"MIT"
] | 2
|
2021-06-08T21:32:02.000Z
|
2022-03-12T00:29:24.000Z
|
tests/test_comment.py
|
barackmaund1/flaskblog
|
3b6c40612b3483b37b60eaeb807c5f33e6d00505
|
[
"MIT"
] | null | null | null |
import unittest
from app.models import User,Comments,Post
from app import db
class CommentTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comments(id= 1,comment='test comment',user=self.username,post_id = self.new_post)
def tearDown(self):
User.query.delete()
Post.query.delete()
def test_check_instance(self):
self.assertEquals(self.new_comment.comment,'test comment')
self.assertEquals(self.new_comment.user ,self.username)
self.assertEquals(self.new_comment.post_id,self.new_post)
class CommentTest(unittest.TestCase):
def setUp(self):
self.user_albert = User(username= 'albert',email='albert@yahoo.com',pass_secure= 'albert')
self.new_post= Post(id=1,title='test',content='a test blog',user_id=self.user_albert)
self.new_comment= Comments(id=1,comment='test comment',post_id=self.new_post.id,user_id=self.user_albert)
def tearDown(self):
User.query.delete()
Comments.query.delete()
Post.query.delete()
def test_check_instance(self):
self.assertEquals(self.new_comment.comment, 'test comment')
self.assertEquals(self.new_comment.post_id,self.new_post.id)
self.assertEquals(self.new_comment.user_id,self.user_albert)
def save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comments.query.all()> 0)
def test_get_comment(self):
self.new_comment.save_comment()
comment = Comments.get_comment(1)
self.assertTrue(comment is not None)
| 37.52381
| 113
| 0.698604
|
4a85b9b5af8354b29b14b2523f830c7788eff949
| 3,291
|
py
|
Python
|
gw_crawler/malicious_file_crawler/src/settings.py
|
virajut/k8-test-data
|
d2386c2a5c12623f868bd9923d1aa5b262b55b5e
|
[
"Apache-2.0"
] | null | null | null |
gw_crawler/malicious_file_crawler/src/settings.py
|
virajut/k8-test-data
|
d2386c2a5c12623f868bd9923d1aa5b262b55b5e
|
[
"Apache-2.0"
] | null | null | null |
gw_crawler/malicious_file_crawler/src/settings.py
|
virajut/k8-test-data
|
d2386c2a5c12623f868bd9923d1aa5b262b55b5e
|
[
"Apache-2.0"
] | null | null | null |
# Scrapy settings for malicious_file_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import os
from src.constants import DOWNLOAD_PATH
from datetime import datetime
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
from dotenv import load_dotenv
env_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
load_dotenv()
PROJECT_NAME = 'malicious_file_crawler'
# Define JOBDIR path for pausing and resuming crawls
#JOB_DIR = 'crawlers/spider-1'
date = datetime.strftime(datetime.now(), '%Y%m%d')
EXTENSIONS = {
'scrapy_dotpersistence.DotScrapyPersistence': 0,
}
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Get the path to the directory this file is in
CURR_DIR = os.path.abspath(os.path.dirname(__file__))
BOT_NAME = 'src'
SPIDER_MODULES = ['src.spiders']
NEWSPIDER_MODULE = 'src.spiders'
# Config file path
CONFIG_FILE = os.path.join(BASE_PATH, 'config', 'config.ini')
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'malicious_file_crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 32
REACTOR_THREADPOOL_MAXSIZE = 20
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
"src.pipelines.MaliciousFileCrawlerPipeline": 1,
'src.middlewares.MaliciousFileCrawlerDownloaderMiddleware': 543,
}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"src.pipelines.MaliciousFileCrawlerPipeline": 300,
}
DOWNLOAD_TIMEOUT = 12000
MEDIA_ALLOW_REDIRECTS = True
FILES_STORE = DOWNLOAD_PATH
# Uncomment this when MINIO service is running
# max download size of 5gb
DOWNLOAD_MAXSIZE = 5368709120
# scrapyd endpoint
SCRAPYD_ENDPOINT = os.environ.get('scrapyd_endpoint','http:0.0.0.0:6800/')
XTENSIONS = {
'scrapy_dotpersistence.DotScrapyPersistence': 0
}
DOTSCRAPY_ENABLED = True
ADDONS_AWS_ACCESS_KEY_ID = os.environ.get("ADDONS_AWS_ACCESS_KEY_ID")
ADDONS_AWS_SECRET_ACCESS_KEY = os.environ.get("ADDONS_AWS_SECRET_ACCESS_KEY")
ADDONS_AWS_USERNAME = os.environ.get("ADDONS_AWS_USERNAME")
ADDONS_S3_BUCKET = os.environ.get("ADDONS_S3_BUCKET")
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 60 * 60 * 24 * 7
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 30.472222
| 87
| 0.783956
|
f6b6cee515561dd4fcdd90c07b3b2cd6dda8ff30
| 4,775
|
py
|
Python
|
chainer/functions/array/get_item.py
|
dydo0316/test2
|
a9982a8b426dd07eb1ec4e7695a7bc546ecc6063
|
[
"MIT"
] | null | null | null |
chainer/functions/array/get_item.py
|
dydo0316/test2
|
a9982a8b426dd07eb1ec4e7695a7bc546ecc6063
|
[
"MIT"
] | null | null | null |
chainer/functions/array/get_item.py
|
dydo0316/test2
|
a9982a8b426dd07eb1ec4e7695a7bc546ecc6063
|
[
"MIT"
] | null | null | null |
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
from chainer import variable
_numpy_supports_0d_bool_index = \
numpy.lib.NumpyVersion(numpy.__version__) >= '1.13.0'
class GetItem(function_node.FunctionNode):
"""Function that slices array and extract elements."""
def __init__(self, slices):
if isinstance(slices, list):
if all([isinstance(s, int) for s in slices]):
slices = slices,
slices = tuple(slices)
elif not isinstance(slices, tuple):
slices = slices,
if chainer.is_debug():
n_ellipses = 0
for s in slices:
if s is Ellipsis:
n_ellipses += 1
if n_ellipses > 1:
raise ValueError('Only one Ellipsis is allowed')
self.slices = slices
def check_type_forward(self, in_types):
type_check.argname(in_types, ('x',))
def forward(self, xs):
return utils.force_array(xs[0][self.slices]),
def backward(self, indexes, gy):
return GetItemGrad(
self.slices, self.inputs[0].shape, self.inputs[0].dtype).apply(gy)
class GetItemGrad(function_node.FunctionNode):
def __init__(self, slices, in_shape, in_dtype):
self.slices = slices
self._in_shape = in_shape
self._in_dtype = in_dtype
def forward(self, inputs):
gy, = inputs
xp = cuda.get_array_module(*inputs)
gx = xp.zeros(self._in_shape, self._in_dtype)
if xp is numpy:
try:
numpy.add.at(gx, self.slices, gy)
except IndexError:
done = False
# In numpy<1.13, 0-dim boolean index is not supported in
# numpy.add.at and it's supported for 0-dim arr in
# arr.__getitem__.
if not _numpy_supports_0d_bool_index and len(self.slices) == 1:
idx = numpy.asanyarray(self.slices[0])
if idx.dtype == numpy.dtype(bool):
# Convert the array and the mask to 1-dim.
# numpy.add.at with them is supported in older numpy.
numpy.add.at(gx[None], idx[None], gy)
done = True
if not done:
msg = '''
GetItem does not support backward for this slices. The slices argument is not
supported by numpy.add.at, while it is supported by numpy.ndarray.__getitem__.
Please report this error to the issue tracker with the stack trace,
the information of your environment, and your script:
https://github.com/chainer/chainer/issues/new.
'''
raise IndexError(msg)
else:
gx.scatter_add(self.slices, inputs[0])
return gx,
def backward(self, indexes, ggx):
return GetItem(self.slices).apply(ggx)
def get_item(x, slices):
"""Extract elements from array with specified shape, axes and offsets.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): A variable to be sliced.
slices (int, slice, Ellipsis, None, integer array-like, boolean\
array-like or tuple of them):
An object to specify the selection of elements.
Returns:
A :class:`~chainer.Variable` object which contains sliced array of
``x``.
.. note::
It only supports types that are supported by CUDA's atomicAdd when
an integer array is included in ``slices``.
The supported types are ``numpy.float32``, ``numpy.int32``,
``numpy.uint32``, ``numpy.uint64`` and ``numpy.ulonglong``.
.. note::
It does not support ``slices`` that contains multiple boolean arrays.
.. note::
See NumPy document for details of `indexing
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.
.. admonition:: Example
>>> x = np.arange(12).reshape((2, 2, 3))
>>> x
array([[[ 0, 1, 2],
[ 3, 4, 5]],
<BLANKLINE>
[[ 6, 7, 8],
[ 9, 10, 11]]])
>>> F.get_item(x, 0)
variable([[0, 1, 2],
[3, 4, 5]])
>>> F.get_item(x, (0, 0, slice(0, 2, 1))) # equals x[0, 0, 0:2:1]
variable([0, 1])
>>> F.get_item(x, (Ellipsis, 2)) # equals x[..., 2]
variable([[ 2, 5],
[ 8, 11]])
>>> F.get_item(x, (1, np.newaxis, 1, 0)) # equals x[1, None, 1, 0]
variable([9])
"""
return GetItem(slices).apply((x,))[0]
def install_variable_get_item():
variable.Variable.__getitem__ = get_item
| 32.04698
| 79
| 0.57089
|
d482fbfb4cc3a29c9cfe7afd5d4554f5e9a6277e
| 3,115
|
py
|
Python
|
config/base.py
|
kentaroy47/kaggle-wheat-arutema47
|
f99de3926d4f3da3d65e3fbb94a6fa8676de4121
|
[
"MIT"
] | 4
|
2021-01-04T04:23:33.000Z
|
2021-01-26T05:47:12.000Z
|
config/base.py
|
kentaroy47/kaggle-wheat-arutema47
|
f99de3926d4f3da3d65e3fbb94a6fa8676de4121
|
[
"MIT"
] | null | null | null |
config/base.py
|
kentaroy47/kaggle-wheat-arutema47
|
f99de3926d4f3da3d65e3fbb94a6fa8676de4121
|
[
"MIT"
] | null | null | null |
import os
import pprint
import re
from ast import literal_eval
from colorama import Back, Fore
from easydict import EasyDict as edict
import yaml
from easydict import EasyDict as edict
def _get_default_config():
c = edict()
# dataset
c.data = edict()
c.data.name = 'data'
c.data.num_classes = 1
c.data.test_dir = 'data/test'
c.data.train_df_path = 'data/train.csv'
c.data.train_df_path = 'models/'
c.data.train_dir = 'data/train'
c.data.params = edict()
c.data.input_size = 1024
c.data.train_size = 1024
c.data.model_scale = 4
c.data.pseudo_path = False
# model
c.model = edict()
c.model.fpn = True
c.model.backbone = 'rx101'
c.model.params = edict()
# train
c.train = edict()
c.train.batch_size = 8
c.train.num_epochs = 100
c.train.cutmix = True
c.train.early_stop_patience = 4
c.train.accumulation_size = 0
c.train.regr_scale = 1
# test
c.test = edict()
c.test.batch_size = 8
c.test.tta = False
# Evals
c.eval = edict()
c.eval.nms = "nms"
# optimizer
c.optimizer = edict()
c.optimizer.name = 'Adam'
c.optimizer.params_type = 'weight_decay'
c.optimizer.params = edict()
c.optimizer.params.encoder_lr = 1.0e-4
c.optimizer.params.decoder_lr = 1.0e-4
c.optimizer.params.weight_decay = 1.0e-4
# scheduler
c.scheduler = edict()
c.scheduler.name = 'plateau'
c.scheduler.params = edict()
# transforms
c.transforms = edict()
c.transforms.params = edict()
c.transforms.train = edict()
c.transforms.train.mean = [0.485, 0.456, 0.406]
c.transforms.train.std = [0.229, 0.224, 0.225]
c.transforms.train.Contrast = False
c.transforms.train.Noise = False
c.transforms.train.Blur = False
c.transforms.train.Distort = False
c.transforms.train.ShiftScaleRotate = False
c.transforms.test = edict()
c.transforms.test.mean = [0.485, 0.456, 0.406]
c.transforms.test.std = [0.229, 0.224, 0.225]
c.transforms.test.Contrast = False
c.transforms.test.Noise = False
c.transforms.test.Blur = False
c.transforms.test.Distort = False
c.transforms.test.ShiftScaleRotate = False
# losses
c.loss = edict()
c.loss.name = 'Center'
c.loss.params = edict()
c.loss.params.focal = False
c.loss.params.reduce = 'sum'
c.loss.return_callback = False
c.device = 'cuda'
c.num_workers = 8
c.work_dir = './work_dir'
c.checkpoint_path = None
c.debug = False
return c
def _merge_config(src, dst):
if not isinstance(src, edict):
return
for k, v in src.items():
if isinstance(v, edict):
_merge_config(src[k], dst[k])
else:
dst[k] = v
def load_config(config_path):
with open(config_path, 'r') as fid:
yaml_config = edict(yaml.load(fid, Loader=yaml.SafeLoader))
config = _get_default_config()
_merge_config(yaml_config, config)
return config
def save_config(config, file_name):
with open(file_name, "w") as wf:
yaml.dump(config, wf)
| 23.961538
| 67
| 0.634992
|
b4711b50dd2309b5e08e0c12639a56fa2342aea3
| 675
|
py
|
Python
|
homeassistant/components/airly/const.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 6
|
2017-08-02T19:26:39.000Z
|
2020-03-14T22:47:41.000Z
|
homeassistant/components/airly/const.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 54
|
2020-11-17T07:04:57.000Z
|
2022-03-31T06:45:39.000Z
|
homeassistant/components/airly/const.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Constants for Airly integration."""
ATTR_API_ADVICE = "ADVICE"
ATTR_API_CAQI = "CAQI"
ATTR_API_CAQI_DESCRIPTION = "DESCRIPTION"
ATTR_API_CAQI_LEVEL = "LEVEL"
ATTR_API_HUMIDITY = "HUMIDITY"
ATTR_API_PM1 = "PM1"
ATTR_API_PM10 = "PM10"
ATTR_API_PM10_LIMIT = "PM10_LIMIT"
ATTR_API_PM10_PERCENT = "PM10_PERCENT"
ATTR_API_PM25 = "PM25"
ATTR_API_PM25_LIMIT = "PM25_LIMIT"
ATTR_API_PM25_PERCENT = "PM25_PERCENT"
ATTR_API_PRESSURE = "PRESSURE"
ATTR_API_TEMPERATURE = "TEMPERATURE"
CONF_USE_NEAREST = "use_nearest"
DEFAULT_NAME = "Airly"
DOMAIN = "airly"
MANUFACTURER = "Airly sp. z o.o."
MAX_REQUESTS_PER_DAY = 100
NO_AIRLY_SENSORS = "There are no Airly sensors in this area yet."
| 30.681818
| 65
| 0.786667
|
731681b9145103f3de298b06cc3815b1693fe81d
| 1,982
|
py
|
Python
|
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/PickOutboundNumbersRequest.py
|
jorsonzen/aliyun-openapi-python-sdk
|
0afbfa8e5f9e19455695aa799f7dcc1cd853d827
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/PickOutboundNumbersRequest.py
|
jorsonzen/aliyun-openapi-python-sdk
|
0afbfa8e5f9e19455695aa799f7dcc1cd853d827
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/PickOutboundNumbersRequest.py
|
jorsonzen/aliyun-openapi-python-sdk
|
0afbfa8e5f9e19455695aa799f7dcc1cd853d827
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class PickOutboundNumbersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2020-07-01', 'PickOutboundNumbers','CCC')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Count(self):
return self.get_query_params().get('Count')
def set_Count(self,Count):
self.add_query_param('Count',Count)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_SkillGroupIdList(self):
return self.get_query_params().get('SkillGroupIdList')
def set_SkillGroupIdList(self,SkillGroupIdList):
self.add_query_param('SkillGroupIdList',SkillGroupIdList)
def get_CalledNumber(self):
return self.get_query_params().get('CalledNumber')
def set_CalledNumber(self,CalledNumber):
self.add_query_param('CalledNumber',CalledNumber)
| 35.392857
| 78
| 0.767911
|
80a1ce51d819a968c87b89d40b6e50b537261d67
| 733
|
py
|
Python
|
ossPy/scrips/fuzzywuzzytest1.py
|
team-oss/dspg20oss
|
dd9a3c5cd9c26a95bbc9b478ead86f09d9a30d8d
|
[
"MIT"
] | 1
|
2020-06-11T20:03:08.000Z
|
2020-06-11T20:03:08.000Z
|
ossPy/scrips/fuzzywuzzytest1.py
|
team-oss/dspg20oss
|
dd9a3c5cd9c26a95bbc9b478ead86f09d9a30d8d
|
[
"MIT"
] | null | null | null |
ossPy/scrips/fuzzywuzzytest1.py
|
team-oss/dspg20oss
|
dd9a3c5cd9c26a95bbc9b478ead86f09d9a30d8d
|
[
"MIT"
] | 2
|
2020-07-23T19:39:23.000Z
|
2021-10-07T15:33:25.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 20 20:24:06 2020
@author: dnb3k
"""
Ontology=ossPyFuncs.composeWorkplaceOntology()
lesserCompanies=multiCoWorkerTable.iloc[15000:-1]
lesserCompanies['guesses']=""
multiCoWorkerTable['guesses']=""
import difflib
#df['Name_r'] = df.Name_x.map(lambda x: (difflib.get_close_matches(x, dfF.Name)[:1] or [None])[0])
#df2.index = df2.index.map(lambda x: difflib.get_close_matches(x, multiCoWorkerTable.index)[0])
for iAttempts in range(len(multiCoWorkerTable.index)):
multiCoWorkerTable['guesses'].iloc[iAttempts]=difflib.get_close_matches(multiCoWorkerTable['company'].iloc[iAttempts],Ontology[0],cutoff=0.8)
lesserCompanies['guesses'].iloc[iAttempts]
| 29.32
| 145
| 0.746248
|
ffaa051b060f557bcda17138ec8ee9c826b4a27b
| 827
|
py
|
Python
|
jp.atcoder/abc084/abc084_d/9132611.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc084/abc084_d/9132611.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc084/abc084_d/9132611.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
from math import floor, sqrt
def prime_nums(n):
sieve = set(range(2, n + 1))
non_prime = set(range(2 * 2, n + 1, 2))
sieve -= non_prime
for i in range(3, floor(sqrt(n)) + 1, 2):
if i in sieve:
non_prime = set(range(i * 2, n + 1, i))
sieve -= non_prime
return sieve
p = prime_nums(10**5)
cnt = [None] * (10**5 + 1)
cnt[0] = 0
for i in range(1, 10**5 + 1, 2):
if i in p and (i + 1) // 2 in p:
cnt[i] = cnt[i - 1] + 1
else:
cnt[i] = cnt[i - 1]
cnt[i + 1] = cnt[i]
q = int(sys.stdin.readline().rstrip())
lr = zip(*[map(int, sys.stdin.read().split())] * 2)
def main():
for l, r in lr:
yield cnt[r] - cnt[l - 1]
if __name__ == "__main__":
ans = main()
print(*ans, sep="\n")
| 21.205128
| 52
| 0.47763
|
dee5e7d65388e1ab314f10216effd9e913a1b79c
| 6,268
|
py
|
Python
|
pyfitterbap/comm/ui/expanding_widget.py
|
jetperch/fitterbap
|
dc29db72c2d7b01d90556a251be0a361574033bc
|
[
"Apache-2.0"
] | 21
|
2021-05-14T20:16:56.000Z
|
2022-03-30T18:54:31.000Z
|
pyfitterbap/comm/ui/expanding_widget.py
|
jetperch/fitterbap
|
dc29db72c2d7b01d90556a251be0a361574033bc
|
[
"Apache-2.0"
] | null | null | null |
pyfitterbap/comm/ui/expanding_widget.py
|
jetperch/fitterbap
|
dc29db72c2d7b01d90556a251be0a361574033bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide6 import QtCore, QtGui, QtWidgets
class ExpandingWidget(QtWidgets.QWidget):
def __init__(self, parent=None, title=None, animation_duration_ms=None):
super(ExpandingWidget, self).__init__(parent=parent)
self._widget: QtWidgets.QWidget = None
self._animation_duration_ms = 250 if animation_duration_ms is None else int(animation_duration_ms)
self._animation = None
self._main_layout = QtWidgets.QVBoxLayout(self)
self._main_layout.setContentsMargins(0, 0, 0, 0)
self._toggle_button = QtWidgets.QToolButton()
self._toggle_button.setStyleSheet("QToolButton { border: none; }")
self._toggle_button.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self._toggle_button.setArrowType(QtCore.Qt.RightArrow)
self._toggle_button.setCheckable(True)
self._toggle_button.setChecked(False)
self.title = title
self._main_layout.addWidget(self._toggle_button)
self._toggle_button.clicked.connect(self._start_animation)
@property
def title(self):
return self._toggle_button.text()
@title.setter
def title(self, title):
title = '' if title is None else str(title)
self._toggle_button.setText(title)
def _start_animation(self, checked):
if self._widget is None:
return
if checked:
self._widget.show()
else:
self._widget.hide()
arrow_type = QtCore.Qt.DownArrow if checked else QtCore.Qt.RightArrow
self._toggle_button.setArrowType(arrow_type)
def _show(self):
if self._widget is None:
return
# See https://www.qtcentre.org/threads/60494-Animate-an-hidden-widget
if self._widget.isHidden():
self._widget.show() # required to get size
pos = self._widget.pos()
size = self._widget.size()
fo = QtCore.QRect(pos.x(), pos.y() + size.height(), size.width(), size.height())
fi = QtCore.QRect(pos, size)
else:
fo = self._widget.geometry()
fi = QtCore.QRect(fo.x(), fo.y() - fo.height(), fo.width(), fo.height())
animation = QtCore.QPropertyAnimation(self._widget, b'geometry')
animation.setDuration(self._animation_duration_ms)
animation.setEasingCurve(QtCore.QEasingCurve.Linear)
animation.setStartValue(fo)
animation.setEndValue(fi)
animation.start()
self._animation = animation
def setWidget(self, widget):
self._widget = widget
widget.setParent(self)
self._main_layout.addWidget(self._widget)
self._widget.setHidden(not self._toggle_button.isChecked())
class VerticalScrollArea(QtWidgets.QScrollArea):
def __init__(self, parent=None):
super(VerticalScrollArea, self).__init__(parent)
self.setWidgetResizable(True)
self.setHorizontalScrollBarPolicy(QtGui.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtGui.Qt.ScrollBarAsNeeded)
self.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Preferred)
def setWidget(self, widget):
widget.installEventFilter(self)
return super(VerticalScrollArea, self).setWidget(widget)
def eventFilter(self, obj, event):
widget = self.widget()
if obj == widget and event.type() == QtCore.QEvent.Resize:
width = widget.minimumSizeHint().width() + self.verticalScrollBar().width()
self.setMinimumWidth(width)
return False
class _MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(_MainWindow, self).__init__()
self.setWindowTitle('ExpandingWidget Demo')
self._scroll_widget = VerticalScrollArea(self)
self._scroll_widget.setObjectName('central_scroll')
self.setCentralWidget(self._scroll_widget)
self._widget = QtWidgets.QWidget(self._scroll_widget)
self._widget.setObjectName('central_widget')
self._scroll_widget.setWidget(self._widget)
self._layout = QtWidgets.QVBoxLayout(self._widget)
self._layout.setSpacing(6)
self._layout.setContentsMargins(11, 11, 11, 11)
self._layout.setObjectName('central_layout')
self._widgets = []
for widget_id in range(5):
widget = ExpandingWidget(self._widget, title=f'Widget {widget_id}')
inner_widget = QtWidgets.QWidget(widget)
layout = QtWidgets.QVBoxLayout(inner_widget)
labels = []
for label_id in range(10):
label = QtWidgets.QLabel(f'Widget {widget_id}, Label {label_id}', inner_widget)
layout.addWidget(label)
labels.append(label)
widget.setWidget(inner_widget)
self._widgets.append([widget, inner_widget, layout, labels])
self._layout.addWidget(widget)
self._spacer = QtWidgets.QSpacerItem(0, 0,
QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self._layout.addItem(self._spacer)
self.show()
def _run():
import sys
import ctypes
# http://doc.qt.io/qt-5/highdpi.html
# https://vicrucann.github.io/tutorials/osg-qt-high-dpi/
if sys.platform.startswith('win'):
ctypes.windll.user32.SetProcessDPIAware()
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
app = QtWidgets.QApplication(sys.argv)
ui = _MainWindow()
rc = app.exec_()
del ui
del app
return rc
if __name__ == '__main__':
_run()
| 37.532934
| 106
| 0.662412
|
07b4c015db8ae71955629db51175c9b3cb59adb6
| 41,347
|
py
|
Python
|
src/sagemaker/local/image.py
|
jbarz1/sagemaker-python-sdk
|
a7399455f5386d83ddc5cb15c0db00c04bd518ec
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/local/image.py
|
jbarz1/sagemaker-python-sdk
|
a7399455f5386d83ddc5cb15c0db00c04bd518ec
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/local/image.py
|
jbarz1/sagemaker-python-sdk
|
a7399455f5386d83ddc5cb15c0db00c04bd518ec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import base64
import errno
import json
import logging
import os
import platform
import random
import re
import shlex
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
from distutils.spawn import find_executable
from threading import Thread
from six.moves.urllib.parse import urlparse
import sagemaker
import sagemaker.local.data
import sagemaker.local.utils
import sagemaker.utils
CONTAINER_PREFIX = "algo"
DOCKER_COMPOSE_FILENAME = "docker-compose.yaml"
DOCKER_COMPOSE_HTTP_TIMEOUT_ENV = "COMPOSE_HTTP_TIMEOUT"
DOCKER_COMPOSE_HTTP_TIMEOUT = "120"
# Environment variables to be set during training
REGION_ENV_NAME = "AWS_REGION"
TRAINING_JOB_NAME_ENV_NAME = "TRAINING_JOB_NAME"
S3_ENDPOINT_URL_ENV_NAME = "S3_ENDPOINT_URL"
logger = logging.getLogger(__name__)
class _SageMakerContainer(object):
"""Handle the lifecycle and configuration of a local container execution.
This class is responsible for creating the directories and configuration
files that the docker containers will use for either training or serving.
"""
def __init__(
self,
instance_type,
instance_count,
image,
sagemaker_session=None,
container_entrypoint=None,
container_arguments=None,
):
"""Initialize a SageMakerContainer instance
It uses a :class:`sagemaker.session.Session` for general interaction
with user configuration such as getting the default sagemaker S3 bucket.
However this class does not call any of the SageMaker APIs.
Args:
instance_type (str): The instance type to use. Either 'local' or
'local_gpu'
instance_count (int): The number of instances to create.
image (str): docker image to use.
sagemaker_session (sagemaker.session.Session): a sagemaker session
to use when interacting with SageMaker.
container_entrypoint (str): the container entrypoint to execute
container_arguments (str): the container entrypoint arguments
"""
from sagemaker.local.local_session import LocalSession
# check if docker-compose is installed
if find_executable("docker-compose") is None:
raise ImportError(
"'docker-compose' is not installed. "
"Local Mode features will not work without docker-compose. "
"For more information on how to install 'docker-compose', please, see "
"https://docs.docker.com/compose/install/"
)
self.sagemaker_session = sagemaker_session or LocalSession()
self.instance_type = instance_type
self.instance_count = instance_count
self.image = image
self.container_entrypoint = container_entrypoint
self.container_arguments = container_arguments
# Since we are using a single docker network, Generate a random suffix to attach to the
# container names. This way multiple jobs can run in parallel.
suffix = "".join(random.choice(string.ascii_lowercase + string.digits) for _ in range(5))
self.hosts = [
"{}-{}-{}".format(CONTAINER_PREFIX, i, suffix)
for i in range(1, self.instance_count + 1)
]
self.container_root = None
self.container = None
def process(
self, processing_inputs, processing_output_config, environment, processing_job_name
):
"""Run a processing job locally using docker-compose.
Args:
processing_inputs (dict): The processing input specification.
processing_output_config (dict): The processing output configuration specification.
environment (dict): The environment collection for the processing job.
processing_job_name (str): Name of the local processing job being run.
"""
self.container_root = self._create_tmp_folder()
# A shared directory for all the containers;
# it is only mounted if the processing script is Local.
shared_dir = os.path.join(self.container_root, "shared")
os.mkdir(shared_dir)
data_dir = self._create_tmp_folder()
volumes = self._prepare_processing_volumes(
data_dir, processing_inputs, processing_output_config
)
# Create the configuration files for each container that we will create.
for host in self.hosts:
_create_processing_config_file_directories(self.container_root, host)
self.write_processing_config_files(
host, environment, processing_inputs, processing_output_config, processing_job_name
)
self._generate_compose_file(
"process", additional_volumes=volumes, additional_env_vars=environment
)
compose_command = self._compose()
if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image):
_pull_image(self.image)
process = subprocess.Popen(
compose_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
try:
_stream_output(process)
except RuntimeError as e:
# _stream_output() doesn't have the command line. We will handle the exception
# which contains the exit code and append the command line to it.
msg = f"Failed to run: {compose_command}"
raise RuntimeError(msg) from e
finally:
# Uploading processing outputs back to Amazon S3.
self._upload_processing_outputs(data_dir, processing_output_config)
try:
# Deleting temporary directories.
dirs_to_delete = [shared_dir, data_dir]
self._cleanup(dirs_to_delete)
except OSError:
pass
# Print our Job Complete line to have a similar experience to training on SageMaker where
# you see this line at the end.
print("===== Job Complete =====")
def train(self, input_data_config, output_data_config, hyperparameters, job_name):
"""Run a training job locally using docker-compose.
Args:
input_data_config (dict): The Input Data Configuration, this contains data such as the
channels to be used for training.
output_data_config: The configuration of the output data.
hyperparameters (dict): The HyperParameters for the training job.
job_name (str): Name of the local training job being run.
Returns (str): Location of the trained model.
"""
self.container_root = self._create_tmp_folder()
os.mkdir(os.path.join(self.container_root, "output"))
# create output/data folder since sagemaker-containers 2.0 expects it
os.mkdir(os.path.join(self.container_root, "output", "data"))
# A shared directory for all the containers. It is only mounted if the training script is
# Local.
shared_dir = os.path.join(self.container_root, "shared")
os.mkdir(shared_dir)
data_dir = self._create_tmp_folder()
volumes = self._prepare_training_volumes(
data_dir, input_data_config, output_data_config, hyperparameters
)
# If local, source directory needs to be updated to mounted /opt/ml/code path
hyperparameters = self._update_local_src_path(
hyperparameters, key=sagemaker.estimator.DIR_PARAM_NAME
)
# Create the configuration files for each container that we will create
# Each container will map the additional local volumes (if any).
for host in self.hosts:
_create_config_file_directories(self.container_root, host)
self.write_config_files(host, hyperparameters, input_data_config)
shutil.copytree(data_dir, os.path.join(self.container_root, host, "input", "data"))
training_env_vars = {
REGION_ENV_NAME: self.sagemaker_session.boto_region_name,
TRAINING_JOB_NAME_ENV_NAME: job_name,
}
if self.sagemaker_session.s3_resource is not None:
training_env_vars[
S3_ENDPOINT_URL_ENV_NAME
] = self.sagemaker_session.s3_resource.meta.client._endpoint.host
compose_data = self._generate_compose_file(
"train", additional_volumes=volumes, additional_env_vars=training_env_vars
)
compose_command = self._compose()
if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image):
_pull_image(self.image)
process = subprocess.Popen(
compose_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
try:
_stream_output(process)
except RuntimeError as e:
# _stream_output() doesn't have the command line. We will handle the exception
# which contains the exit code and append the command line to it.
msg = "Failed to run: %s, %s" % (compose_command, str(e))
raise RuntimeError(msg)
finally:
artifacts = self.retrieve_artifacts(compose_data, output_data_config, job_name)
# free up the training data directory as it may contain
# lots of data downloaded from S3. This doesn't delete any local
# data that was just mounted to the container.
dirs_to_delete = [data_dir, shared_dir]
self._cleanup(dirs_to_delete)
# Print our Job Complete line to have a similar experience to training on SageMaker where
# you see this line at the end.
print("===== Job Complete =====")
return artifacts
def serve(self, model_dir, environment):
"""Host a local endpoint using docker-compose.
Args:
primary_container (dict): dictionary containing the container runtime settings
for serving. Expected keys:
- 'ModelDataUrl' pointing to a file or s3:// location.
- 'Environment' a dictionary of environment variables to be passed to the
hosting container.
"""
logger.info("serving")
self.container_root = self._create_tmp_folder()
logger.info("creating hosting dir in %s", self.container_root)
volumes = self._prepare_serving_volumes(model_dir)
# If the user script was passed as a file:// mount it to the container.
if sagemaker.estimator.DIR_PARAM_NAME.upper() in environment:
script_dir = environment[sagemaker.estimator.DIR_PARAM_NAME.upper()]
parsed_uri = urlparse(script_dir)
if parsed_uri.scheme == "file":
volumes.append(_Volume(parsed_uri.path, "/opt/ml/code"))
# Update path to mount location
environment = environment.copy()
environment[sagemaker.estimator.DIR_PARAM_NAME.upper()] = "/opt/ml/code"
if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image):
_pull_image(self.image)
self._generate_compose_file(
"serve", additional_env_vars=environment, additional_volumes=volumes
)
compose_command = self._compose()
self.container = _HostingContainer(compose_command)
self.container.start()
def stop_serving(self):
"""Stop the serving container.
The serving container runs in async mode to allow the SDK to do other
tasks.
"""
if self.container:
self.container.down()
self.container.join()
self._cleanup()
# for serving we can delete everything in the container root.
_delete_tree(self.container_root)
def retrieve_artifacts(self, compose_data, output_data_config, job_name):
"""Get the model artifacts from all the container nodes.
Used after training completes to gather the data from all the
individual containers. As the official SageMaker Training Service, it
will override duplicate files if multiple containers have the same file
names.
Args:
compose_data (dict): Docker-Compose configuration in dictionary
format.
output_data_config: The configuration of the output data.
job_name: The name of the job.
Returns: Local path to the collected model artifacts.
"""
# We need a directory to store the artfiacts from all the nodes
# and another one to contained the compressed final artifacts
artifacts = os.path.join(self.container_root, "artifacts")
compressed_artifacts = os.path.join(self.container_root, "compressed_artifacts")
os.mkdir(artifacts)
model_artifacts = os.path.join(artifacts, "model")
output_artifacts = os.path.join(artifacts, "output")
artifact_dirs = [model_artifacts, output_artifacts, compressed_artifacts]
for d in artifact_dirs:
os.mkdir(d)
# Gather the artifacts from all nodes into artifacts/model and artifacts/output
for host in self.hosts:
volumes = compose_data["services"][str(host)]["volumes"]
for volume in volumes:
if re.search(r"^[A-Za-z]:", volume):
unit, host_dir, container_dir = volume.split(":")
host_dir = unit + ":" + host_dir
else:
host_dir, container_dir = volume.split(":")
if container_dir == "/opt/ml/model":
sagemaker.local.utils.recursive_copy(host_dir, model_artifacts)
elif container_dir == "/opt/ml/output":
sagemaker.local.utils.recursive_copy(host_dir, output_artifacts)
# Tar Artifacts -> model.tar.gz and output.tar.gz
model_files = [os.path.join(model_artifacts, name) for name in os.listdir(model_artifacts)]
output_files = [
os.path.join(output_artifacts, name) for name in os.listdir(output_artifacts)
]
sagemaker.utils.create_tar_file(
model_files, os.path.join(compressed_artifacts, "model.tar.gz")
)
sagemaker.utils.create_tar_file(
output_files, os.path.join(compressed_artifacts, "output.tar.gz")
)
if output_data_config["S3OutputPath"] == "":
output_data = "file://%s" % compressed_artifacts
else:
# Now we just need to move the compressed artifacts to wherever they are required
output_data = sagemaker.local.utils.move_to_destination(
compressed_artifacts,
output_data_config["S3OutputPath"],
job_name,
self.sagemaker_session,
)
_delete_tree(model_artifacts)
_delete_tree(output_artifacts)
return os.path.join(output_data, "model.tar.gz")
def write_processing_config_files(
self, host, environment, processing_inputs, processing_output_config, processing_job_name
):
"""Write the config files for the processing containers.
This method writes the hyperparameters, resources and input data
configuration files.
Args:
host (str): Host to write the configuration for
environment (dict): Environment variable collection.
processing_inputs (dict): Processing inputs.
processing_output_config (dict): Processing output configuration.
processing_job_name (str): Processing job name.
"""
config_path = os.path.join(self.container_root, host, "config")
resource_config = {"current_host": host, "hosts": self.hosts}
_write_json_file(os.path.join(config_path, "resourceconfig.json"), resource_config)
processing_job_config = {
"ProcessingJobArn": processing_job_name,
"ProcessingJobName": processing_job_name,
"AppSpecification": {
"ImageUri": self.image,
"ContainerEntrypoint": self.container_entrypoint,
"ContainerArguments": self.container_arguments,
},
"Environment": environment,
"ProcessingInputs": processing_inputs,
"ProcessingOutputConfig": processing_output_config,
"ProcessingResources": {
"ClusterConfig": {
"InstanceCount": self.instance_count,
"InstanceType": self.instance_type,
"VolumeSizeInGB": 30,
"VolumeKmsKeyId": None,
}
},
"RoleArn": "<no_role>",
"StoppingCondition": {"MaxRuntimeInSeconds": 86400},
}
_write_json_file(
os.path.join(config_path, "processingjobconfig.json"), processing_job_config
)
def write_config_files(self, host, hyperparameters, input_data_config):
"""Write the config files for the training containers.
This method writes the hyperparameters, resources and input data
configuration files.
Returns: None
Args:
host (str): Host to write the configuration for
hyperparameters (dict): Hyperparameters for training.
input_data_config (dict): Training input channels to be used for
training.
"""
config_path = os.path.join(self.container_root, host, "input", "config")
resource_config = {"current_host": host, "hosts": self.hosts}
json_input_data_config = {}
for c in input_data_config:
channel_name = c["ChannelName"]
json_input_data_config[channel_name] = {"TrainingInputMode": "File"}
if "ContentType" in c:
json_input_data_config[channel_name]["ContentType"] = c["ContentType"]
_write_json_file(os.path.join(config_path, "hyperparameters.json"), hyperparameters)
_write_json_file(os.path.join(config_path, "resourceconfig.json"), resource_config)
_write_json_file(os.path.join(config_path, "inputdataconfig.json"), json_input_data_config)
def _prepare_training_volumes(
self, data_dir, input_data_config, output_data_config, hyperparameters
):
"""Prepares the training volumes based on input and output data configs.
Args:
data_dir:
input_data_config:
output_data_config:
hyperparameters:
"""
shared_dir = os.path.join(self.container_root, "shared")
model_dir = os.path.join(self.container_root, "model")
volumes = []
volumes.append(_Volume(model_dir, "/opt/ml/model"))
# Mount the metadata directory if present.
# Only expected to be present on SM notebook instances.
# This is used by some DeepEngine libraries
metadata_dir = "/opt/ml/metadata"
if os.path.isdir(metadata_dir):
volumes.append(_Volume(metadata_dir, metadata_dir))
# Set up the channels for the containers. For local data we will
# mount the local directory to the container. For S3 Data we will download the S3 data
# first.
for channel in input_data_config:
uri = channel["DataUri"]
channel_name = channel["ChannelName"]
channel_dir = os.path.join(data_dir, channel_name)
os.mkdir(channel_dir)
data_source = sagemaker.local.data.get_data_source_instance(uri, self.sagemaker_session)
volumes.append(_Volume(data_source.get_root_dir(), channel=channel_name))
# If there is a training script directory and it is a local directory,
# mount it to the container.
if sagemaker.estimator.DIR_PARAM_NAME in hyperparameters:
training_dir = json.loads(hyperparameters[sagemaker.estimator.DIR_PARAM_NAME])
parsed_uri = urlparse(training_dir)
if parsed_uri.scheme == "file":
volumes.append(_Volume(parsed_uri.path, "/opt/ml/code"))
# Also mount a directory that all the containers can access.
volumes.append(_Volume(shared_dir, "/opt/ml/shared"))
parsed_uri = urlparse(output_data_config["S3OutputPath"])
if (
parsed_uri.scheme == "file"
and sagemaker.model.SAGEMAKER_OUTPUT_LOCATION in hyperparameters
):
intermediate_dir = os.path.join(parsed_uri.path, "output", "intermediate")
if not os.path.exists(intermediate_dir):
os.makedirs(intermediate_dir)
volumes.append(_Volume(intermediate_dir, "/opt/ml/output/intermediate"))
return volumes
def _prepare_processing_volumes(self, data_dir, processing_inputs, processing_output_config):
"""Prepares local container volumes for the processing job.
Args:
data_dir: The local data directory.
processing_inputs: The configuration of processing inputs.
processing_output_config: The configuration of processing outputs.
Returns:
The volumes configuration.
"""
shared_dir = os.path.join(self.container_root, "shared")
volumes = []
# Set up the input/outputs for the container.
for item in processing_inputs:
uri = item["DataUri"]
input_container_dir = item["S3Input"]["LocalPath"]
data_source = sagemaker.local.data.get_data_source_instance(uri, self.sagemaker_session)
volumes.append(_Volume(data_source.get_root_dir(), input_container_dir))
if processing_output_config and "Outputs" in processing_output_config:
for item in processing_output_config["Outputs"]:
output_name = item["OutputName"]
output_container_dir = item["S3Output"]["LocalPath"]
output_dir = os.path.join(data_dir, "output", output_name)
os.makedirs(output_dir)
volumes.append(_Volume(output_dir, output_container_dir))
volumes.append(_Volume(shared_dir, "/opt/ml/shared"))
return volumes
def _upload_processing_outputs(self, data_dir, processing_output_config):
"""Uploads processing outputs to Amazon S3.
Args:
data_dir: The local data directory.
processing_output_config: The processing output configuration.
"""
if processing_output_config and "Outputs" in processing_output_config:
for item in processing_output_config["Outputs"]:
output_name = item["OutputName"]
output_s3_uri = item["S3Output"]["S3Uri"]
output_dir = os.path.join(data_dir, "output", output_name)
sagemaker.local.utils.move_to_destination(
output_dir, output_s3_uri, "", self.sagemaker_session
)
def _update_local_src_path(self, params, key):
"""Updates the local path of source code.
Args:
params: Existing configuration parameters.
key: Lookup key for the path of the source code in the configuration parameters.
Returns:
The updated parameters.
"""
if key in params:
src_dir = json.loads(params[key])
parsed_uri = urlparse(src_dir)
if parsed_uri.scheme == "file":
new_params = params.copy()
new_params[key] = json.dumps("/opt/ml/code")
return new_params
return params
def _prepare_serving_volumes(self, model_location):
"""Prepares the serving volumes.
Args:
model_location: Location of the models.
"""
volumes = []
host = self.hosts[0]
# Make the model available to the container. If this is a local file just mount it to
# the container as a volume. If it is an S3 location, the DataSource will download it, we
# just need to extract the tar file.
host_dir = os.path.join(self.container_root, host)
os.makedirs(host_dir)
model_data_source = sagemaker.local.data.get_data_source_instance(
model_location, self.sagemaker_session
)
for filename in model_data_source.get_file_list():
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as tar:
tar.extractall(path=model_data_source.get_root_dir())
volumes.append(_Volume(model_data_source.get_root_dir(), "/opt/ml/model"))
return volumes
def _generate_compose_file(self, command, additional_volumes=None, additional_env_vars=None):
"""Writes a config file describing a training/hosting environment.
This method generates a docker compose configuration file, it has an
entry for each container that will be created (based on self.hosts). it
calls
:meth:~sagemaker.local_session.SageMakerContainer._create_docker_host to
generate the config for each individual container.
Args:
command (str): either 'train' or 'serve'
additional_volumes (list): a list of volumes that will be mapped to
the containers
additional_env_vars (dict): a dictionary with additional environment
variables to be passed on to the containers.
Returns: (dict) A dictionary representation of the configuration that was written.
"""
boto_session = self.sagemaker_session.boto_session
additional_volumes = additional_volumes or []
additional_env_vars = additional_env_vars or {}
environment = []
optml_dirs = set()
aws_creds = _aws_credentials(boto_session)
if aws_creds is not None:
environment.extend(aws_creds)
additional_env_var_list = ["{}={}".format(k, v) for k, v in additional_env_vars.items()]
environment.extend(additional_env_var_list)
if os.environ.get(DOCKER_COMPOSE_HTTP_TIMEOUT_ENV) is None:
os.environ[DOCKER_COMPOSE_HTTP_TIMEOUT_ENV] = DOCKER_COMPOSE_HTTP_TIMEOUT
if command == "train":
optml_dirs = {"output", "output/data", "input"}
elif command == "process":
optml_dirs = {"output", "config"}
services = {
h: self._create_docker_host(h, environment, optml_dirs, command, additional_volumes)
for h in self.hosts
}
content = {
# Use version 2.3 as a minimum so that we can specify the runtime
"version": "2.3",
"services": services,
"networks": {"sagemaker-local": {"name": "sagemaker-local"}},
}
docker_compose_path = os.path.join(self.container_root, DOCKER_COMPOSE_FILENAME)
try:
import yaml
except ImportError as e:
logger.error(sagemaker.utils._module_import_error("yaml", "Local mode", "local"))
raise e
yaml_content = yaml.dump(content, default_flow_style=False)
logger.info("docker compose file: \n%s", yaml_content)
with open(docker_compose_path, "w") as f:
f.write(yaml_content)
return content
def _compose(self, detached=False):
"""Invokes the docker compose command.
Args:
detached:
"""
compose_cmd = "docker-compose"
command = [
compose_cmd,
"-f",
os.path.join(self.container_root, DOCKER_COMPOSE_FILENAME),
"up",
"--build",
"--abort-on-container-exit" if not detached else "--detach", # mutually exclusive
]
logger.info("docker command: %s", " ".join(command))
return command
def _create_docker_host(self, host, environment, optml_subdirs, command, volumes):
"""Creates the docker host configuration.
Args:
host:
environment:
optml_subdirs:
command:
volumes:
"""
optml_volumes = self._build_optml_volumes(host, optml_subdirs)
optml_volumes.extend(volumes)
container_name_prefix = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
host_config = {
"image": self.image,
"container_name": f"{container_name_prefix}-{host}",
"stdin_open": True,
"tty": True,
"volumes": [v.map for v in optml_volumes],
"environment": environment,
"networks": {"sagemaker-local": {"aliases": [host]}},
}
if command != "process":
host_config["command"] = command
else:
if self.container_entrypoint:
host_config["entrypoint"] = self.container_entrypoint
if self.container_arguments:
host_config["entrypoint"] = host_config["entrypoint"] + self.container_arguments
# for GPU support pass in nvidia as the runtime, this is equivalent
# to setting --runtime=nvidia in the docker commandline.
if self.instance_type == "local_gpu":
host_config["runtime"] = "nvidia"
if command == "serve":
serving_port = (
sagemaker.utils.get_config_value(
"local.serving_port", self.sagemaker_session.config
)
or 8080
)
host_config.update({"ports": ["%s:8080" % serving_port]})
return host_config
def _create_tmp_folder(self):
"""Placeholder docstring"""
root_dir = sagemaker.utils.get_config_value(
"local.container_root", self.sagemaker_session.config
)
if root_dir:
root_dir = os.path.abspath(root_dir)
working_dir = tempfile.mkdtemp(dir=root_dir)
# Docker cannot mount Mac OS /var folder properly see
# https://forums.docker.com/t/var-folders-isnt-mounted-properly/9600
# Only apply this workaround if the user didn't provide an alternate storage root dir.
if root_dir is None and platform.system() == "Darwin":
working_dir = "/private{}".format(working_dir)
return os.path.abspath(working_dir)
def _build_optml_volumes(self, host, subdirs):
"""Generate a list of :class:`~sagemaker.local_session.Volume`.
These are required for the container to start. It takes a folder with
the necessary files for training and creates a list of opt volumes
that the Container needs to start.
Args:
host (str): container for which the volumes will be generated.
subdirs (list): list of subdirectories that will be mapped. For
example: ['input', 'output', 'model']
Returns: (list) List of :class:`~sagemaker.local_session.Volume`
"""
volumes = []
for subdir in subdirs:
host_dir = os.path.join(self.container_root, host, subdir)
container_dir = "/opt/ml/{}".format(subdir)
volume = _Volume(host_dir, container_dir)
volumes.append(volume)
return volumes
def _cleanup(self, dirs_to_delete=None):
"""Cleans up directories and the like.
Args:
dirs_to_delete:
"""
if dirs_to_delete:
for d in dirs_to_delete:
_delete_tree(d)
# Free the container config files.
for host in self.hosts:
container_config_path = os.path.join(self.container_root, host)
_delete_tree(container_config_path)
class _HostingContainer(Thread):
"""Placeholder docstring."""
def __init__(self, command):
"""Creates a new threaded hosting container.
Args:
command:
"""
Thread.__init__(self)
self.command = command
self.process = None
def run(self):
"""Placeholder docstring"""
self.process = subprocess.Popen(
self.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
try:
_stream_output(self.process)
except RuntimeError as e:
# _stream_output() doesn't have the command line. We will handle the exception
# which contains the exit code and append the command line to it.
msg = "Failed to run: %s, %s" % (self.command, str(e))
raise RuntimeError(msg)
def down(self):
"""Placeholder docstring"""
self.process.terminate()
class _Volume(object):
"""Represent a Volume that will be mapped to a container."""
def __init__(self, host_dir, container_dir=None, channel=None):
"""Create a Volume instance.
The container path can be provided as a container_dir or as a channel name but not both.
Args:
host_dir (str): path to the volume data in the host
container_dir (str): path inside the container that host_dir will be mapped to
channel (str): channel name that the host_dir represents. It will be mapped as
/opt/ml/input/data/<channel> in the container.
"""
if not container_dir and not channel:
raise ValueError("Either container_dir or channel must be declared.")
if container_dir and channel:
raise ValueError("container_dir and channel cannot be declared together.")
self.container_dir = container_dir if container_dir else "/opt/ml/input/data/" + channel
self.host_dir = host_dir
if platform.system() == "Darwin" and host_dir.startswith("/var"):
self.host_dir = os.path.join("/private", host_dir)
self.map = "{}:{}".format(self.host_dir, self.container_dir)
def _stream_output(process):
"""Stream the output of a process to stdout
This function takes an existing process that will be polled for output.
Only stdout will be polled and sent to sys.stdout.
Args:
process (subprocess.Popen): a process that has been started with
stdout=PIPE and stderr=STDOUT
Returns (int): process exit code
"""
exit_code = None
while exit_code is None:
stdout = process.stdout.readline().decode("utf-8")
sys.stdout.write(stdout)
exit_code = process.poll()
if exit_code != 0:
raise RuntimeError("Process exited with code: %s" % exit_code)
return exit_code
def _check_output(cmd, *popenargs, **kwargs):
"""Makes a call to `subprocess.check_output` for the given command and args.
Args:
cmd:
*popenargs:
**kwargs:
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
success = True
try:
output = subprocess.check_output(cmd, *popenargs, **kwargs)
except subprocess.CalledProcessError as e:
output = e.output
success = False
output = output.decode("utf-8")
if not success:
logger.error("Command output: %s", output)
raise Exception("Failed to run %s" % ",".join(cmd))
return output
def _create_processing_config_file_directories(root, host):
"""Creates the directory for the processing config files.
Args:
root: The root path.
host: The current host.
"""
for d in ["config"]:
os.makedirs(os.path.join(root, host, d))
def _create_config_file_directories(root, host):
"""Creates the directories for the config files.
Args:
root:
host:
"""
for d in ["input", "input/config", "output", "model"]:
os.makedirs(os.path.join(root, host, d))
def _delete_tree(path):
"""Makes a call to `shutil.rmtree` for the given path.
Args:
path:
"""
try:
shutil.rmtree(path)
except OSError as exc:
# on Linux, when docker writes to any mounted volume, it uses the container's user. In most
# cases this is root. When the container exits and we try to delete them we can't because
# root owns those files. We expect this to happen, so we handle EACCESS. Any other error
# we will raise the exception up.
if exc.errno == errno.EACCES:
logger.warning("Failed to delete: %s Please remove it manually.", path)
else:
logger.error("Failed to delete: %s", path)
raise
def _aws_credentials(session):
"""Provides the AWS credentials of the session as a paired list of strings.
These can be used to set environment variables on command execution.
Args:
session:
"""
try:
creds = session.get_credentials()
access_key = creds.access_key
secret_key = creds.secret_key
token = creds.token
# The presence of a token indicates the credentials are short-lived and as such are risky
# to be used as they might expire while running.
# Long-lived credentials are available either through
# 1. boto session
# 2. EC2 Metadata Service (SageMaker Notebook instances or EC2 instances with roles
# attached them)
# Short-lived credentials available via boto session are permitted to support running on
# machines with no EC2 Metadata Service but a warning is provided about their danger
if token is None:
logger.info("Using the long-lived AWS credentials found in session")
return [
"AWS_ACCESS_KEY_ID=%s" % (str(access_key)),
"AWS_SECRET_ACCESS_KEY=%s" % (str(secret_key)),
]
if not _aws_credentials_available_in_metadata_service():
logger.warning(
"Using the short-lived AWS credentials found in session. They might expire while "
"running."
)
return [
"AWS_ACCESS_KEY_ID=%s" % (str(access_key)),
"AWS_SECRET_ACCESS_KEY=%s" % (str(secret_key)),
"AWS_SESSION_TOKEN=%s" % (str(token)),
]
logger.info(
"No AWS credentials found in session but credentials from EC2 Metadata Service are "
"available."
)
return None
except Exception as e: # pylint: disable=broad-except
logger.info("Could not get AWS credentials: %s", e)
return None
def _aws_credentials_available_in_metadata_service():
"""Placeholder docstring"""
import botocore
from botocore.credentials import InstanceMetadataProvider
from botocore.utils import InstanceMetadataFetcher
session = botocore.session.Session()
instance_metadata_provider = InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=session.get_config_variable("metadata_service_timeout"),
num_attempts=session.get_config_variable("metadata_service_num_attempts"),
user_agent=session.user_agent(),
)
)
return not instance_metadata_provider.load() is None
def _write_json_file(filename, content):
"""Write the contents dict as json to the file.
Args:
filename:
content:
"""
with open(filename, "w") as f:
json.dump(content, f)
def _ecr_login_if_needed(boto_session, image):
"""Log into ECR, if needed.
Of note, only ECR images need login.
Args:
boto_session:
image:
"""
sagemaker_pattern = re.compile(sagemaker.utils.ECR_URI_PATTERN)
sagemaker_match = sagemaker_pattern.match(image)
if not sagemaker_match:
return False
# do we have the image?
if _check_output("docker images -q %s" % image).strip():
return False
if not boto_session:
raise RuntimeError(
"A boto session is required to login to ECR."
"Please pull the image: %s manually." % image
)
ecr = boto_session.client("ecr")
auth = ecr.get_authorization_token(registryIds=[image.split(".")[0]])
authorization_data = auth["authorizationData"][0]
raw_token = base64.b64decode(authorization_data["authorizationToken"])
token = raw_token.decode("utf-8").strip("AWS:")
ecr_url = auth["authorizationData"][0]["proxyEndpoint"]
cmd = "docker login -u AWS -p %s %s" % (token, ecr_url)
subprocess.check_output(cmd.split())
return True
def _pull_image(image):
"""Invokes the docker pull command for the given image.
Args:
image:
"""
pull_image_command = ("docker pull %s" % image).strip()
logger.info("docker command: %s", pull_image_command)
subprocess.check_output(pull_image_command.split())
logger.info("image pulled: %s", image)
| 38.072744
| 100
| 0.63787
|
a966b7c9eaf3c5732baf9acf04e4fb3cebd082fb
| 1,082
|
py
|
Python
|
guild/prepare_cmd.py
|
guildai/_guild-python-legacy
|
e552eff820d8edcfeb10b26bd5c8651548507b4a
|
[
"Apache-2.0"
] | null | null | null |
guild/prepare_cmd.py
|
guildai/_guild-python-legacy
|
e552eff820d8edcfeb10b26bd5c8651548507b4a
|
[
"Apache-2.0"
] | null | null | null |
guild/prepare_cmd.py
|
guildai/_guild-python-legacy
|
e552eff820d8edcfeb10b26bd5c8651548507b4a
|
[
"Apache-2.0"
] | null | null | null |
import guild.cmd_support
import guild.op
import guild.op_support
def main(args):
op = _prepare_op(args)
if args.preview:
_preview(op)
else:
_prepare(op)
def _prepare_op(args):
project = guild.cmd_support.project_for_args(args)
section = guild.cmd_support.model_or_resource_for_args(args, project)
spec = section.attr("prepare")
if not spec:
_not_preparable_error(section)
return guild.op.Op(
cmd_args=guild.op_support.python_cmd_for_spec(spec, section),
cmd_env=guild.op_support.base_env(),
cmd_cwd=section.project.dir,
opdir_pattern=None,
meta={},
tasks=[])
def _not_preparable_error(section):
guild.cli.error(
"section%s does not support a prepare operation\n"
"Try 'guild prepare --help' for more information."
% _maybe_section_name(section))
def _maybe_section_name(section):
if section.name:
return " " + section
else:
return ""
def _preview(op):
guild.cmd_support.preview_op(op)
def _prepare(op):
op.run()
| 25.162791
| 73
| 0.665434
|
6757cec0fe70f5966d00bcd98fa379d2187e87d8
| 2,365
|
py
|
Python
|
tests/template_tests/syntax_tests/test_named_endblock.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/template_tests/syntax_tests/test_named_endblock.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/template_tests/syntax_tests/test_named_endblock.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class NamedEndblockTests(SimpleTestCase):
@setup({'namedendblocks01': '1{% block first %}_{% block second %}'
'2{% endblock second %}_{% endblock first %}3'})
def test_namedendblocks01(self):
output = self.engine.render_to_string('namedendblocks01')
self.assertEqual(output, '1_2_3')
# Unbalanced blocks
@setup({'namedendblocks02': '1{% block first %}_{% block second %}'
'2{% endblock first %}_{% endblock second %}3'})
def test_namedendblocks02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks02')
@setup({'namedendblocks03': '1{% block first %}_{% block second %}'
'2{% endblock %}_{% endblock second %}3'})
def test_namedendblocks03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks03')
@setup({'namedendblocks04': '1{% block first %}_{% block second %}'
'2{% endblock second %}_{% endblock third %}3'})
def test_namedendblocks04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks04')
@setup({'namedendblocks05': '1{% block first %}_{% block second %}2{% endblock first %}'})
def test_namedendblocks05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks05')
# Mixed named and unnamed endblocks
@setup({'namedendblocks06': '1{% block first %}_{% block second %}'
'2{% endblock %}_{% endblock first %}3'})
def test_namedendblocks06(self):
"""
Mixed named and unnamed endblocks
"""
output = self.engine.render_to_string('namedendblocks06')
self.assertEqual(output, '1_2_3')
@setup({'namedendblocks07': '1{% block first %}_{% block second %}'
'2{% endblock second %}_{% endblock %}3'})
def test_namedendblocks07(self):
output = self.engine.render_to_string('namedendblocks07')
self.assertEqual(output, '1_2_3')
| 43.796296
| 95
| 0.600423
|
6bdb6686d6a757d0d7221845d5580c32aa0e956a
| 727
|
py
|
Python
|
fake_private.py
|
iBELLinc/charlie-cello
|
c64904ae7538bf913150fd64f13eaa04d9ea4679
|
[
"MIT"
] | null | null | null |
fake_private.py
|
iBELLinc/charlie-cello
|
c64904ae7538bf913150fd64f13eaa04d9ea4679
|
[
"MIT"
] | 1
|
2021-08-12T14:51:34.000Z
|
2021-08-12T14:51:34.000Z
|
fake_private.py
|
iBELLinc/charlie-cello
|
c64904ae7538bf913150fd64f13eaa04d9ea4679
|
[
"MIT"
] | null | null | null |
import discord
# Information that should not be publicly acessible
TOKEN = "<placeholder>" # You will need to add your own token for testing
bot_admin = #<int placeholder> # UID of bot admin
guild_id = #<int placeholder> # The guild ID that the bot is active on
bot_channel = #<int placeholder> # ID of the channel that bot posts updates to
global CLIENT # The Bot
global GUILD # The Server
global BOTADMIN # The Bot Admin
global SERVEROWNER # The Owner of the Server
global BOT_CHANNEL # The channel that the bot speaks in
global ROLES # All server roles
CLIENT = None
GUILD = None
BOTADMIN = None
SEVREROWNER = None
BOT_CHANNEL = None
ROLES = {}
| 33.045455
| 85
| 0.678129
|
2c341eaa7befc5acb96ddc83d19c25d64c742ab4
| 3,681
|
py
|
Python
|
q_learning.py
|
zholland/CMPUT690Project
|
28063b396d14942d98bf11eb597a10dd09f2c9e8
|
[
"MIT"
] | 1
|
2019-03-29T19:26:02.000Z
|
2019-03-29T19:26:02.000Z
|
q_learning.py
|
zholland/CMPUT690Project
|
28063b396d14942d98bf11eb597a10dd09f2c9e8
|
[
"MIT"
] | null | null | null |
q_learning.py
|
zholland/CMPUT690Project
|
28063b396d14942d98bf11eb597a10dd09f2c9e8
|
[
"MIT"
] | null | null | null |
import gym
from gym.envs import register
from joblib import Parallel, delayed
from n_step_method import NStepMethodBase
import numpy as np
from tile_coding_action_value_function import TileCodingActionValueFunction
class Qlearning(NStepMethodBase):
def __init__(self, env, alpha, epsilon, gamma, action_value_function, epsilon_decay_factor=0.95):
super().__init__(env, alpha, epsilon, 1, gamma, action_value_function)
self.epsilon_decay_factor = epsilon_decay_factor
def do_learning(self, num_episodes, show_env=False):
for episodeNum in range(num_episodes):
S = self.env.reset()
A = self.epsilon_greedy_action(S)
done = False
Rsum = 0
while not done:
if show_env:
self.env.render()
Snext, R, done, info = self.env.step(A)
Rsum += R
# Anext = random.randint(0,2)
Anext = self.epsilon_greedy_action(Snext)
self.action_value_function.update(S, A, self.alpha * (
R + self.gamma * np.max(self.action_value_function.action_values(Snext)) - self.action_value_function.value(S,A)))
S = Snext
A = Anext
self.epsilon *= self.epsilon_decay_factor
# print(Rsum)
self.episode_return.append(Rsum)
if episodeNum >= 50 and np.mean(self.episode_return[episodeNum - 50:episodeNum]) > 195.0:
break
class RewardsInfo:
def __init__(self, mean_return):
self.mean_return = mean_return
def do_experiment(parameters):
alpha, epsilon = parameters
env = gym.make('MountainCar-v0')
dim_ranges = [env.observation_space.high[i] - env.observation_space.low[i] for i in
range(0, env.observation_space.high.size)]
num_tilings = 8
action_value_function = TileCodingActionValueFunction(env.observation_space.shape[0],
dim_ranges,
env.action_space.n,
num_tiles=2048,
num_tilings=num_tilings,
scale_inputs=True)
qlearning = Qlearning(env, alpha / num_tilings, epsilon, 1, action_value_function, epsilon_decay_factor=0.98)
qlearning.do_learning(1000, show_env=False)
episodes_completed = np.size(qlearning.episode_return)
print("****alpha: ", alpha, ", epsilon: ",epsilon, "*****")
print("Mean return: ", np.mean(qlearning.episode_return))
print("Last 100 Episodes window: ", np.mean(qlearning.episode_return[episodes_completed - 100:episodes_completed]))
print("Total episodes: ", np.size(qlearning.episode_return))
print("Total time steps: ", np.abs(np.sum(qlearning.episode_return)))
return RewardsInfo(np.mean(qlearning.episode_return[episodes_completed - 100:episodes_completed]))
if __name__ == "__main__":
register(
id='MountainCar-v3',
entry_point='gym.envs.classic_control:MountainCarEnv',
max_episode_steps=10000,
reward_threshold=-110.0,
)
rewards_list = []
# alphas = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# alphas = [0.3, 0.4, 0.5, 0.6, 0.7]
alphas = [0.4]
epsilons = [0.1]
parameters_list = [(alpha, epsilon) for alpha in alphas for epsilon in epsilons]
for reward_info in Parallel(n_jobs=2)(
delayed(do_experiment)(parameters) for parameters in parameters_list):
rewards_list.append(reward_info)
| 41.829545
| 130
| 0.609889
|
7d3fe212e40c589ddd445f5b737dbfa25957218e
| 1,053
|
py
|
Python
|
setup.py
|
hredestig/cookiecutter-decaf-python
|
0d25e474996d5b653a6d4042f2ec6bf5ecce1f1b
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
hredestig/cookiecutter-decaf-python
|
0d25e474996d5b653a6d4042f2ec6bf5ecce1f1b
|
[
"Apache-2.0"
] | 1
|
2017-08-04T10:34:53.000Z
|
2017-08-04T10:34:53.000Z
|
setup.py
|
hredestig/cookiecutter-decaf-python
|
0d25e474996d5b653a6d4042f2ec6bf5ecce1f1b
|
[
"Apache-2.0"
] | 1
|
2017-08-03T13:36:31.000Z
|
2017-08-03T13:36:31.000Z
|
# !/usr/bin/env python
from distutils.core import setup
setup(
name='cookiecutter-decaf-python',
packages=[],
version='0.1.0',
description='Cookiecutter template for a Python package at Biosustain',
author='Henning Redestig',
license='BSD',
author_email='henred@dtu.dk',
url='https://github.com/biosustain/cookiecutter-decaf-python',
keywords=['cookiecutter', 'template', 'package', ],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development',
],
)
| 35.1
| 75
| 0.614435
|
1c3f581920ca6d909ea815a12c43b203410bfd34
| 1,113
|
py
|
Python
|
bluelog/extensions.py
|
meizhaohui/bluelog
|
e4ea274cec5abe78b142f23f99675d3fdf5f1170
|
[
"MIT"
] | null | null | null |
bluelog/extensions.py
|
meizhaohui/bluelog
|
e4ea274cec5abe78b142f23f99675d3fdf5f1170
|
[
"MIT"
] | null | null | null |
bluelog/extensions.py
|
meizhaohui/bluelog
|
e4ea274cec5abe78b142f23f99675d3fdf5f1170
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
@Author : Zhaohui Mei(梅朝辉)
@Email : mzh.whut@gmail.com
@Time : 2018/11/18 20:53
@File : extensions.py
@Version : 1.0
@Interpreter: Python3.6.2
@Software: PyCharm
@Description: 扩展类实例化
"""
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_moment import Moment
from flask_ckeditor import CKEditor
from flask_mail import Mail
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
# 创建对象
bootstrap = Bootstrap()
db = SQLAlchemy()
moment = Moment()
ckeditor = CKEditor()
mail = Mail()
login_manager = LoginManager() # 用户登陆管理
csrf = CSRFProtect() # 使用CSRFProtect实现CSRF保护
# 视图保护设置
login_manager.login_view = 'auth.login' # 未登陆时跳转到这个视图来
login_manager.login_message_category = 'warning' # 消息类型
# 未登陆访问保护视图时的消息提示
login_manager.login_message = 'Please login to access this page.(请先登陆!)'
@login_manager.user_loader
def load_user(user_id):
"""用户加载函数,FLask-Login用于获取当前用户的对象,必须要设置"""
from bluelog.models import Admin
user = Admin.query.get(int(user_id))
return user
| 25.295455
| 73
| 0.728661
|
e165e1c4b8a4045fd95494d88dcb2b2bdd578bf2
| 10,768
|
py
|
Python
|
kafka_utils/kafka_cluster_manager/cmds/rebalance.py
|
akki/kafka-utils
|
fd2272fd2db61bc68113ef97b61194959fbd4525
|
[
"Apache-2.0"
] | 1
|
2020-02-27T22:01:45.000Z
|
2020-02-27T22:01:45.000Z
|
kafka_utils/kafka_cluster_manager/cmds/rebalance.py
|
akki/kafka-utils
|
fd2272fd2db61bc68113ef97b61194959fbd4525
|
[
"Apache-2.0"
] | null | null | null |
kafka_utils/kafka_cluster_manager/cmds/rebalance.py
|
akki/kafka-utils
|
fd2272fd2db61bc68113ef97b61194959fbd4525
|
[
"Apache-2.0"
] | 3
|
2020-02-03T13:54:03.000Z
|
2021-06-20T00:59:32.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import logging
import sys
import six
from .command import ClusterManagerCmd
from kafka_utils.kafka_cluster_manager.cluster_info.display \
import display_cluster_topology_stats
from kafka_utils.kafka_cluster_manager.cluster_info.stats \
import get_replication_group_imbalance_stats
from kafka_utils.util import positive_float
from kafka_utils.util import positive_int
from kafka_utils.util.validation import assignment_to_plan
from kafka_utils.util.validation import validate_plan
DEFAULT_MAX_PARTITION_MOVEMENTS = 1
DEFAULT_MAX_LEADER_CHANGES = 5
class RebalanceCmd(ClusterManagerCmd):
def __init__(self):
super(RebalanceCmd, self).__init__()
self.log = logging.getLogger('ClusterRebalance')
def build_subparser(self, subparsers):
subparser = subparsers.add_parser(
'rebalance',
description='Rebalance cluster by moving partitions across brokers '
'and changing the preferred replica.',
help='This command is used to rebalance a Kafka cluster. Based on '
'the given flags this tool will generate and submit a reassinment '
'plan that will evenly distribute partitions and leaders '
'across the brokers of the cluster. The replication groups option '
'moves the replicas of the same partition to separate replication '
'making the cluster resilient to the failure of one of more zones.'
)
subparser.add_argument(
'--replication-groups',
action='store_true',
help='Evenly distributes replicas over replication-groups.',
)
subparser.add_argument(
'--brokers',
action='store_true',
help='Evenly distributes partitions optimally over brokers'
' with minimal movements for each replication-group.',
)
subparser.add_argument(
'--leaders',
action='store_true',
help='Evenly distributes leaders optimally over brokers.',
)
subparser.add_argument(
'--max-partition-movements',
type=positive_int,
default=DEFAULT_MAX_PARTITION_MOVEMENTS,
help='Maximum number of partition-movements in final set of actions.'
' DEFAULT: %(default)s. RECOMMENDATION: Should be at least max '
'replication-factor across the cluster.',
)
subparser.add_argument(
'--max-leader-changes',
type=positive_int,
default=DEFAULT_MAX_LEADER_CHANGES,
help='Maximum number of actions with leader-only changes.'
' DEFAULT: %(default)s',
)
subparser.add_argument(
'--max-movement-size',
type=positive_float,
default=None,
help='Maximum total size of the partitions moved in the final set'
' of actions. Since each PartitionMeasurer implementation'
' defines its own notion of size, the size unit to use will'
' depend on the selected PartitionMeasurer implementation.'
' DEFAULT: No limit.'
' RECOMMENDATION: Should be at least the maximum partition-size'
' on the cluster.',
)
subparser.add_argument(
'--auto-max-movement-size',
action='store_true',
help='Set max-movement-size to the size of the largest partition'
' in the cluster.',
)
subparser.add_argument(
'--show-stats',
action='store_true',
help='Output post-rebalance cluster topology stats.',
)
subparser.add_argument(
'--score-improvement-threshold',
type=positive_float,
default=None,
help='The minimum required improvement in cluster topology score'
' for an assignment to be applied. Default: None',
)
return subparser
def run_command(self, cluster_topology, cluster_balancer):
"""Get executable proposed plan(if any) for display or execution."""
# The ideal weight of each broker is total_weight / broker_count.
# It should be possible to remove partitions from each broker until
# the weight of the broker is less than this ideal value, otherwise it
# is impossible to balance the cluster. If --max-movement-size is too
# small, exit with an error.
if self.args.max_movement_size:
total_weight = sum(
partition.weight
for partition in six.itervalues(cluster_topology.partitions)
)
broker_count = len(cluster_topology.brokers)
optimal_weight = total_weight / broker_count
broker, max_unmovable_on_one_broker = max((
(broker, sum(
partition.weight
for partition in broker.partitions
if partition.size > self.args.max_movement_size
))
for broker in cluster_topology.brokers.values()),
key=lambda t: t[1],
)
if max_unmovable_on_one_broker >= optimal_weight:
sorted_partitions = sorted(
[
partition
for partition in broker.partitions
if partition.size > self.args.max_movement_size
],
reverse=True,
key=lambda partition: partition.size,
)
for partition in sorted_partitions:
max_unmovable_on_one_broker -= partition.weight
if max_unmovable_on_one_broker <= optimal_weight:
required_max_movement_size = partition.size
break
self.log.error(
'Max movement size {max_movement_size} is too small, it is'
' not be possible to balance the cluster. A max movement'
' size of {required} or higher is required.'.format(
max_movement_size=self.args.max_movement_size,
required=required_max_movement_size,
)
)
sys.exit(1)
elif self.args.auto_max_movement_size:
self.args.max_movement_size = max(
partition.size
for partition in six.itervalues(cluster_topology.partitions)
)
self.log.info(
'Auto-max-movement-size: using {max_movement_size} as'
' max-movement-size.'.format(
max_movement_size=self.args.max_movement_size,
)
)
base_assignment = cluster_topology.assignment
base_score = cluster_balancer.score()
rg_imbalance, _ = get_replication_group_imbalance_stats(
list(cluster_topology.rgs.values()),
list(cluster_topology.partitions.values())
)
cluster_balancer.rebalance()
assignment = cluster_topology.assignment
score = cluster_balancer.score()
new_rg_imbalance, _ = get_replication_group_imbalance_stats(
list(cluster_topology.rgs.values()),
list(cluster_topology.partitions.values())
)
if self.args.show_stats:
display_cluster_topology_stats(cluster_topology, base_assignment)
if base_score is not None and score is not None:
print('\nScore before: %f' % base_score)
print('Score after: %f' % score)
print('Score improvement: %f' % (score - base_score))
if not validate_plan(
assignment_to_plan(assignment),
assignment_to_plan(base_assignment),
):
self.log.error('Invalid latest-cluster assignment. Exiting.')
sys.exit(1)
if self.args.score_improvement_threshold:
if base_score is None or score is None:
self.log.error(
'%s cannot assign scores so --score-improvement-threshold'
' cannot be used.',
cluster_balancer.__class__.__name__,
)
return
else:
score_improvement = score - base_score
if score_improvement >= self.args.score_improvement_threshold:
self.log.info(
'Score improvement %f is greater than the threshold %f.'
' Continuing to apply the assignment.',
score_improvement,
self.args.score_improvement_threshold,
)
elif new_rg_imbalance < rg_imbalance:
self.log.info(
'Score improvement %f is less than the threshold %f,'
' but replica balance has improved. Continuing to'
' apply the assignment.',
score_improvement,
self.args.score_improvement_threshold,
)
else:
self.log.info(
'Score improvement %f is less than the threshold %f.'
' Assignment will not be applied.',
score_improvement,
self.args.score_improvement_threshold,
)
return
# Reduce the proposed assignment based on max_partition_movements
# and max_leader_changes
reduced_assignment = self.get_reduced_assignment(
base_assignment,
assignment,
self.args.max_partition_movements,
self.args.max_leader_changes,
)
if reduced_assignment:
self.process_assignment(reduced_assignment)
else:
self.log.info("Cluster already balanced. No actions to perform.")
| 41.57529
| 81
| 0.589432
|
84e7800edc331db293948b4363242ed4e50e9659
| 1,880
|
py
|
Python
|
app/app.py
|
salmedina/hmr
|
ad4a272712078edb0abe4e19dde1b6b4ced7d7f1
|
[
"MIT"
] | null | null | null |
app/app.py
|
salmedina/hmr
|
ad4a272712078edb0abe4e19dde1b6b4ced7d7f1
|
[
"MIT"
] | null | null | null |
app/app.py
|
salmedina/hmr
|
ad4a272712078edb0abe4e19dde1b6b4ced7d7f1
|
[
"MIT"
] | null | null | null |
import argparse
import flask
from flask import Flask, render_template, request
from flask_bootstrap import Bootstrap
import os
import json
from PIL import ImageDraw, Image
UPLOAD_FOLDER = os.path.join('static', 'uploads')
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
Bootstrap(app)
def draw_bboxex(img_path, bboxes, selected_indices, save_path):
im = Image.open(img_path)
for idx, bbox in enumerate(bboxes):
if idx in selected_indices:
draw = ImageDraw.Draw(im)
print(bbox)
draw.rectangle(bbox, fill=None)
# write to stdout
im.save(save_path)
@app.route('/', methods=['GET', 'POST'])
@app.route('/index.html', methods=['GET', 'POST'])
def main():
display_image_path = os.path.join(app.config['UPLOAD_FOLDER'], 'meva_sample_3.png')
return render_template('index.html', display_image=display_image_path)
@app.route('/renderbbox', methods=['GET'])
def render_bbox():
display_image_path = os.path.join(app.config['UPLOAD_FOLDER'], 'meva_sample_3.png')
save_image_path = os.path.join(app.config['UPLOAD_FOLDER'], 'meva_render.png')
print(save_image_path)
persons_selected = request.args.getlist('jsdata[]')
persons_selected = [int(person) for person in persons_selected]
print(persons_selected)
person_bboxes = [[319, 43, 539, 427], [0, 2, 245, 485], [95, 132, 429, 551]]
draw_bboxex(display_image_path, person_bboxes, persons_selected, save_image_path)
return json.dumps({"url": save_image_path}), 200, {'Content-Type': 'application/json'}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ip', type=str, default='0.0.0.0', help='Server IP')
parser.add_argument('--port', type=int, default=5000, help='Server port')
args = parser.parse_args()
app.run(debug=True, host=args.ip, port=args.port)
| 34.814815
| 90
| 0.703191
|
a7b7cf4b615a4796871aec49cedd28069b252a4a
| 986
|
py
|
Python
|
lesson001/test_lesson_01fuzzy.py
|
victorkaplunov/QAA_training
|
4dcba01f934bbeed75c3d909f6dcd9228be673cf
|
[
"MIT"
] | null | null | null |
lesson001/test_lesson_01fuzzy.py
|
victorkaplunov/QAA_training
|
4dcba01f934bbeed75c3d909f6dcd9228be673cf
|
[
"MIT"
] | null | null | null |
lesson001/test_lesson_01fuzzy.py
|
victorkaplunov/QAA_training
|
4dcba01f934bbeed75c3d909f6dcd9228be673cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- encoding=utf8 -*-
"""Test of web service ip-api.com. Example of fuzzy testing and using
JSON schema. This test checks the default parameters of the JSON version."""
import json
import pytest
import requests
from faker import Faker
from faker.providers import internet
import jsonschema
REPEAT_COUNT = 5
@pytest.fixture(params=list(range(REPEAT_COUNT)))
def ip_address():
"""Create random IP addresses."""
fake = Faker()
fake.add_provider(internet)
return iter([fake.ipv4_public()])
def test_fuzzy(ip_address):
"""Repeat request ip-api.com with random IP address end check JSON
from response with given JSON schema."""
with open('lesson001/json_schema.json', 'r', encoding='utf8') as file:
file_data = file.read()
response = requests.get('http://ip-api.com/json/' + next(ip_address))
print(json.dumps(response.json(), indent=4))
assert jsonschema.validate(response.json(), json.loads(file_data)) is None
| 27.388889
| 78
| 0.71501
|
4b83e12b9e4ab5cfb2fb0eb3de3361cbb6f85beb
| 1,871
|
py
|
Python
|
static-website/setup.py
|
onema/cdk-constructs
|
8c25af39e0803c804a3c22541aa303904cde34b8
|
[
"Apache-2.0"
] | 1
|
2021-07-04T02:40:22.000Z
|
2021-07-04T02:40:22.000Z
|
static-website/setup.py
|
onema/cdk-constructs
|
8c25af39e0803c804a3c22541aa303904cde34b8
|
[
"Apache-2.0"
] | 1
|
2021-06-26T22:31:06.000Z
|
2021-06-26T22:31:06.000Z
|
static-website/setup.py
|
onema/cdk-constructs
|
8c25af39e0803c804a3c22541aa303904cde34b8
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md") as fp:
long_description = fp.read()
__version__ = "0.1.0"
setup(
name="onema-cdk.static-website",
version=__version__,
description="A CDK Python construct to create static S3 websites. This is a port of the AWS static site example https://github.com/aws-samples/aws-cdk-examples/blob/master/typescript/static-site/static-site.ts",
long_description=long_description,
long_description_content_type="text/markdown",
author="Juan Manuel Torres",
author_email="software@onema.io",
url="https://github.com/onema/cdk-constructs", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/onema/cdk-constructs/issues",
"Documentation": "https://github.com/onema/cdk-constructs",
"Source Code": "https://github.com/onema/cdk-constructs",
},
packages=find_packages(exclude=["ez_setup", "test", "test.*"]),
install_requires=[
"aws-cdk.core",
"aws-cdk.aws-events",
"aws-cdk.aws-events-targets",
"aws-cdk.aws-certificatemanager",
"aws-cdk.aws-cloudfront",
"aws-cdk.aws-route53",
"aws-cdk.aws-route53-targets",
"aws-cdk.aws-s3",
"aws-cdk.aws-s3-deployment",
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
"Typing :: Typed",
],
)
| 30.672131
| 215
| 0.62961
|
4fd9f4829d4206ba0eed13af296eb14c1f784303
| 2,690
|
py
|
Python
|
estatisticas_facebook/faceusers/models.py
|
danieldourado/estatisticas_facebook_django
|
67274e647cf9e2261f1a7810cd9862a4040dfc06
|
[
"MIT"
] | 2
|
2017-12-22T01:00:22.000Z
|
2017-12-22T11:14:40.000Z
|
estatisticas_facebook/faceusers/models.py
|
danieldourado/estatisticas_facebook_django
|
67274e647cf9e2261f1a7810cd9862a4040dfc06
|
[
"MIT"
] | 18
|
2017-12-14T12:04:45.000Z
|
2022-03-11T23:23:05.000Z
|
estatisticas_facebook/faceusers/models.py
|
danieldourado/estatisticas_facebook_django
|
67274e647cf9e2261f1a7810cd9862a4040dfc06
|
[
"MIT"
] | 1
|
2021-03-27T16:18:56.000Z
|
2021-03-27T16:18:56.000Z
|
from django.core.urlresolvers import reverse
from django.db import models
from estatisticas_facebook.util.graph import *
def getFaceUser(user_json):
temp_user, created = FaceUsers.objects.get_or_create(id=user_json.get('id'))
temp_user.name = user_json.get('name')
return temp_user
def setInteraction(model, interaction):
interaction = interaction.lower()
if interaction.lower() in ('like','love','wow','haha','sad','angry', 'pride','thankful'):
model.reactions += 1
attribute = 'post_reactions_'+interaction+'_total'
value = getattr(model, attribute) + 1
setattr(model, attribute, value)
else:
value = getattr(model, interaction) + 1
setattr(model, interaction, value)
#debug('new user interaction saved: '+interaction)
def addInteraction(user_json, interaction):
face_user = getFaceUser(user_json)
setInteraction(face_user, interaction)
face_user.save()
return face_user
class FaceUsers(models.Model):
id = models.CharField(primary_key = True, max_length = 45)
name = models.CharField(max_length = 450, default="")
comments = models.IntegerField(default=0)
shares = models.IntegerField(default=0)
reactions = models.IntegerField(default=0)
post_reactions_like_total = models.IntegerField(default=0)
post_reactions_love_total = models.IntegerField(default=0)
post_reactions_wow_total = models.IntegerField(default=0)
post_reactions_haha_total = models.IntegerField(default=0)
post_reactions_sad_total = models.IntegerField(default=0)
post_reactions_angry_total = models.IntegerField(default=0)
post_reactions_pride_total = models.IntegerField(default=0)
post_reactions_thankful_total = models.IntegerField(default=0)
post_reactions_positivo_total = models.IntegerField(default=0)
post_reactions_negativo_total = models.IntegerField(default=0)
post_reactions_positivo_porcentagem = models.IntegerField(default=0)
post_reactions_negativo_porcentagem = models.IntegerField(default=0)
permalink_url = models.CharField(max_length = 450, default="")
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.id
def get_absolute_url(self):
return reverse('faceusers:detail', args=[str(self.id)])
| 47.192982
| 99
| 0.63829
|
730cae363d355073646ff1505a6738e7ce9dd3bb
| 24,549
|
py
|
Python
|
tfkit/test/test_model.py
|
jc-hou/TFkit
|
127f52174dfdfce8792b03bfc05eee9495875d6c
|
[
"Apache-2.0"
] | null | null | null |
tfkit/test/test_model.py
|
jc-hou/TFkit
|
127f52174dfdfce8792b03bfc05eee9495875d6c
|
[
"Apache-2.0"
] | null | null | null |
tfkit/test/test_model.py
|
jc-hou/TFkit
|
127f52174dfdfce8792b03bfc05eee9495875d6c
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from torch import Tensor
from transformers import BertTokenizer, AutoModel, AutoTokenizer
import tfkit
import timeit
from tfkit.test import *
class TestModel(unittest.TestCase):
def testClas(self):
input = "One hundred thirty-four patients suspected of having pancreas cancer successfully underwent gray scale ultrasound examination of the pancreas ."
target = "a"
tokenizer = BertTokenizer.from_pretrained('voidful/albert_chinese_tiny')
pretrained = AutoModel.from_pretrained('voidful/albert_chinese_tiny')
model = tfkit.model.clas.Model(tokenizer, pretrained, tasks_detail={"taskA": ["a", "b"]})
for feature in tfkit.model.clas.get_feature_from_data(tokenizer, tasks={"taskA": ["a", "b"]},
task="taskA",
input=input, target=target, maxlen=512):
for k, v in feature.items():
feature[k] = [v, v]
print(feature)
# test train
print(model(feature))
self.assertTrue(isinstance(model(feature), Tensor))
# test eval
print(model(feature, eval=True))
model_dict = model(feature, eval=True)
self.assertTrue('label_prob_all' in model_dict)
self.assertTrue('label_map' in model_dict)
# test predict
tok_label = model.predict(task="taskA", input=input)
self.assertTrue(len(tok_label) == 2)
# test predict with top k 2
top_k_label, top_k_dict = model.predict(task="taskA", input=input, topk=2)
print("test predict with top k 2, ", top_k_label, top_k_dict)
self.assertTrue(len(top_k_label) == 2)
# test exceed 512
for merge_strategy in ['minentropy', 'maxcount', 'maxprob']:
result, model_dict = model.predict(task="taskA", input=" ".join([str(i) for i in range(2000)]),
merge_strategy=merge_strategy)
print(result, len(model_dict), model_dict)
self.assertTrue(isinstance(result, list))
self.assertTrue(len(result) == 1)
def testQA(self):
input = "梵 語 在 社 交 中 口 頭 使 用 , 並 且 在 早 期 古 典 梵 語 文 獻 的 發 展 中 維 持 口 頭 傳 統 。 在 印 度 , 書 寫 形 式 是 當 梵 語 發 展 成 俗 語 之 後 才 出 現 的 ; 在 書 寫 梵 語 的 時 候 , 書 寫 系 統 的 選 擇 受 抄 寫 者 所 處 地 域 的 影 響 。 同 樣 的 , 所 有 南 亞 的 主 要 書 寫 系 統 事 實 上 都 用 於 梵 語 文 稿 的 抄 寫 。 自 1 9 世 紀 晚 期 , 天 城 文 被 定 為 梵 語 的 標 準 書 寫 系 統 , 十 分 可 能 的 原 因 是 歐 洲 人 有 用 這 種 文 字 印 刷 梵 語 文 本 的 習 慣 。 最 早 的 已 知 梵 語 碑 刻 可 確 定 為 公 元 前 一 世 紀 。 它 們 採 用 了 最 初 用 於 俗 語 而 非 梵 語 的 婆 羅 米 文 。 第 一 個 書 寫 梵 語 的 證 據 , 出 現 在 晚 於 它 的 俗 語 的 書 寫 證 據 之 後 的 幾 個 世 紀 , 這 被 描 述 為 一 種 悖 論 。 在 梵 語 被 書 寫 下 來 的 時 候 , 它 首 先 用 於 行 政 、 文 學 或 科 學 類 的 文 本 。 宗 教 文 本 口 頭 傳 承 , 在 相 當 晚 的 時 候 才 「 不 情 願 」 地 被 書 寫 下 來 。 [Question] 最 初 梵 語 以 什 麼 書 寫 系 統 被 記 錄 下 來 ?"
target = [201, 205]
tokenizer = BertTokenizer.from_pretrained('voidful/albert_chinese_tiny')
pretrained = AutoModel.from_pretrained('voidful/albert_chinese_tiny')
model = tfkit.model.qa.Model(tokenizer, pretrained, maxlen=512)
for feature in tfkit.model.qa.get_feature_from_data(tokenizer, input, target, maxlen=512):
for k, v in feature.items():
feature[k] = [v, v]
# test train
print(model(feature))
self.assertTrue(isinstance(model(feature), Tensor))
# test eval
print(model(feature, eval=True))
model_dict = model(feature, eval=True)
self.assertTrue('label_prob_all' in model_dict)
self.assertTrue('label_map' in model_dict)
# test predict
result, model_dict = model.predict(input=input)
print("model_dict", model_dict, input, result)
self.assertTrue('label_prob_all' in model_dict[0])
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 1)
# # test eval top k = 2
# top_k_label, top_k_dict = model.predict(input=input, topk=2)
# print("top_k_label", top_k_label)
# self.assertTrue(len(top_k_label) == 2)
# test exceed 512
for merge_strategy in ['minentropy', 'maxcount', 'maxprob']:
result, model_dict = model.predict(input=" ".join([str(i) for i in range(550)]),
merge_strategy=merge_strategy)
print(result, len(model_dict))
self.assertTrue(isinstance(result, list))
self.assertTrue(len(result) == 1)
def testTag(self):
tokenizer = BertTokenizer.from_pretrained('voidful/albert_chinese_small')
pretrained = AutoModel.from_pretrained('voidful/albert_chinese_small')
input = "在 歐 洲 , 梵 語 的 學 術 研 究 , 由 德 國 學 者 陸 特 和 漢 斯 雷 頓 開 創 。 後 來 威 廉 · 瓊 斯 發 現 印 歐 語 系 , 也 要 歸 功 於 對 梵 語 的 研 究 。 此 外 , 梵 語 研 究 , 也 對 西 方 文 字 學 及 歷 史 語 言 學 的 發 展 , 貢 獻 不 少 。 1 7 8 6 年 2 月 2 日 , 亞 洲 協 會 在 加 爾 各 答 舉 行 。 [SEP] 陸 特 和 漢 斯 雷 頓 開 創 了 哪 一 地 區 對 梵 語 的 學 術 研 究 ?"
target = "O A A O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O"
label = ["O", "A"]
model = tfkit.model.tag.Model(tokenizer=tokenizer, pretrained=pretrained, tasks_detail={"default": label})
# model, model_task, model_class = tfkit.utility.model_loader.load_trained_model('./cache/model.pt',
# 'voidful/albert_chinese_small')
# # test exceed 512
# for merge_strategy in ['minentropy']:
# result, model_dict = model.predict(
# input="""
# Rundfadsfdsfsfning 明朝(1368年1月23日-1644年4月25日[註 1])是中國歷史上最後一個由漢族建立的大一統王朝,歷經十二世、十六位皇帝,國祚二百七十六年[參 4]。\n\n元朝末年政治腐敗,種族紛爭,天災不斷,民不聊生,民變暴動屢禁不止,平民朱元璋加入紅巾軍並在其中乘勢崛起,跟隨佔據濠州的郭子興。郭子興死後,朱元璋被當時反抗軍擁立的小明王韓林兒封為左副元帥,並率部眾先後攻占滁州、和州等地,並最終攻佔集慶(今江蘇南京),採取朱升所建議的「高築牆,廣積糧,緩稱王」的政策,以鞏固根據地,讓士兵屯田積糧減少百姓負擔,以示自己為仁義之師而避免受敵。1364年,朱元璋稱吳王,建立西吳政權。1368年,在掃滅陳友諒、張士誠和方國珍等群雄勢力後,朱元璋於當年農曆正月初四日登基稱帝,立國號為大明[參 5],定都應天府(今南京市),其轄區稱為京師,由因皇室姓朱,故又稱朱明,之後以「驅逐胡虜,恢復中華」[參 6]為號召北伐中原[參 7][參 8],並收回了燕雲十六州[參 9],結束蒙元在中國漢地的統治,統一天下。\n\n明初天下大定,經過朱元璋的休養生息,社會經濟得以恢復和發展,國力迅速恢復,史稱洪武之治。朱元璋去世後,其孫朱允炆即位,但其在靖難之役中敗於駐守燕京的朱元璋第四子朱棣,也自此失蹤。朱棣登基後遷都至順天府(今北京市),將北平布政司升為京師,原京師改稱南京[參 3]。成祖朱棣時期,開疆拓土,又派遣鄭和七下西洋,此後許多漢人遠赴海外,國勢達到頂峰,史稱永樂盛世。其後的仁宗和宣宗時期國家仍處於興盛時期,史稱仁宣之治[參 10]。英宗和代宗時期,遭遇土木之變,國力中衰,經于謙等人抗敵,最終解除國家危機。憲宗和孝宗相繼與民休息,孝宗則力行節儉,減免稅賦,百姓安居樂業,史稱弘治中興[參 11]。武宗時期爆發了南巡之爭和寧王之亂。世宗即位初,引發大禮議之爭,他清除宦官和權臣勢力後總攬朝綱,實現嘉靖中興,並於屯門海戰與西草灣之戰中擊退葡萄牙殖民侵略,任用胡宗憲和俞大猷等將領平定東南沿海的倭患。世宗駕崩後經過隆慶新政國力得到恢復,神宗前期任用張居正,推行萬曆新政,國家收入大增,商品經濟空前繁榮、科學巨匠迭出、社會風尚呈現出活潑開放的新鮮氣息,史稱萬曆中興[參 12]。後經過萬曆三大征平定內憂外患,粉碎豐臣秀吉攻占朝鮮進而入明的計劃,然而因為國本之爭,皇帝逐漸疏於朝政,史稱萬曆怠政,同時東林黨爭也帶來了明中期的政治混亂。\n\n萬曆一朝成為明朝由盛轉衰的轉折期[參 13]。光宗繼位不久因紅丸案暴斃,熹宗繼承大統改元天啟,天啟年間魏忠賢閹黨禍亂朝綱,至明思宗即位後剷除閹黨,但閹黨倒臺後,黨爭又起,政治腐敗以及連年天災[註 2][註 3],導致國力衰退,最終爆發大規模民變。1644年4月25日(舊曆三月十九),李自成所建立的大順軍攻破北京,思宗自縊於煤山,是為甲申之變。隨後吳三桂倒戈相向,滿族建立的滿清入主中原。明朝宗室於江南地區相繼成立南明諸政權,而原本反明的流寇在李自成等領袖死後亦加入南明陣營,這些政權被清朝統治者先後以「為君父報仇」為名各個殲滅,1662年,明朝宗室最後政權被剷除,永曆帝被俘後被殺,滿清又陸續擊敗各地反抗軍,以及攻取台灣、澎湖,1683年,奉大明為正朔的明鄭向清朝投降,漢族抗爭勢力方為清朝所消滅。[參 16]。\n\n明代的核心領土囊括漢地[註 4],東北到外興安嶺及黑龍江流域[參 19],後縮為遼河流域;初年北達戈壁沙漠一帶,後改為今長城;西北至新疆哈密,後改為嘉峪關;西南臨孟加拉灣[註 5],後折回約今雲南境;曾經在今中國東北、新疆東部及西藏等地設有羈縻機構[參 21]。不過,明朝是否實際統治了西藏國際上尚存在有一定的爭議[註 6]。明成祖時期曾短暫征服及統治安南[參 22],永樂二十二年(1424年),明朝國土面積達到極盛,在東南亞設置舊港宣慰司[註 7]等行政機構,加強對東南洋一帶的管理[參 23][參 24]。\n\n明代商品經濟繁榮,出現商業集鎮,而手工業及文化藝術呈現世俗化趨勢[參 25]。根據《明實錄》所載的人口峰值於成化十五年(1479年)達七千餘萬人[參 26],不過許多學者考慮到當時存在大量隱匿戶口,故認為明朝人口峰值實際上逾億[參 27],還有學者認為晚明人口峰值接近2億[註 8]。這一時期,其GDP總量所占的世界比例在中國古代史上也是最高的,1600年明朝GDP總量為960億美元,占世界經濟總量的29.2%,晚明中國人均GDP在600美元[註 9]。\n\n明朝政治則是權力趨於集中,明太祖在誅殺胡惟庸後廢除傳統的丞相制,六部直接對皇帝負責,後來設置內閣;地方上由承宣布政使司、提刑按察使司、都指揮使司分掌權力,加強地方管理。仁宗、宣宗之後,文官治國的思想逐漸濃厚,行政權向內閣和六部轉移。同時還設有都察院等監察機構,為加強對全國臣民的監視,明太祖設立特務機構錦衣衛,明成祖設立東廠,明憲宗時再設西廠(後取消),明武宗又設內行廠(後取消),合稱「廠衛」。但明朝皇帝並非完全獨斷獨行,有許多事還必須經過經廷推、廷議、廷鞫程序,同時,能將原旨退還的給事中亦可對皇權形成制衡。[參 33]到了後期皇帝出現了怠政,宦官行使大權的陋習[參 3],儘管決策權始終集中在皇帝手中,然而政務大部分已經由內閣處理,此外,到了明代中晚期文官集團的集體意見足以與皇帝抗衡,在遇到事情決斷兩相僵持不下時,也容易產生一種類似於「憲政危機(英語:Constitutional crisis)」的情況,因此「名義上他是天子,實際上他受制於廷臣。」[參 34]但明朝皇權受制於廷臣主要是基於道德上而非法理上,因為明朝當時風氣普遍注重名節,受儒家教育的皇帝通常不願被冠以「昏君」之名。但雖然皇權受制衡,皇帝仍可任意動用皇權,例如明世宗「大禮議」事件最後以廷杖朝臣多人的方式結束[參 35],明神宗在國本之爭失利後也以長期拒絕參與政事向朝臣們示威[1][2][3]。\n\n有學者認為明代是繼漢唐之後的黃金時期,也被稱為最後一個可以和漢唐媲美的盛世[參 36]。清代張廷玉等修的官修《明史》評價明朝為「治隆唐宋」[註 10]、「遠邁漢唐」[參 37]。
# """,
# merge_strategy=merge_strategy, start_contain="B_",
# end_contain="I_")
# print(result)
# self.assertTrue(isinstance(result, list))
for feature in tfkit.model.tag.get_feature_from_data(tokenizer, labels=label, input=input, target=target,
maxlen=512):
for k, v in feature.items():
feature[k] = [v, v]
self.assertTrue(isinstance(model(feature), Tensor))
print(model(feature))
# test eval
model_dict = model(feature, eval=True)
self.assertTrue('label_prob_all' in model_dict)
self.assertTrue('label_map' in model_dict)
# test predict
result, model_dict = model.predict(input=input, start_contain="A", end_contain="A")
self.assertTrue('label_prob_all' in model_dict[0])
self.assertTrue('label_map' in model_dict[0])
print("result", result, len(result))
self.assertTrue(isinstance(result, list))
# test exceed 512
for merge_strategy in ['minentropy', 'maxcount', 'maxprob']:
result, model_dict = model.predict(input=" ".join([str(i) for i in range(1000)]),
merge_strategy=merge_strategy, start_contain="A", end_contain="A")
print(result)
self.assertTrue(isinstance(result, list))
def testMask(self):
input = "今 天 [MASK] 情 [MASK] 好"
target = "心 很"
tokenizer = BertTokenizer.from_pretrained('voidful/albert_chinese_small')
pretrained = AutoModel.from_pretrained('voidful/albert_chinese_small')
model = tfkit.model.mask.Model(tokenizer, pretrained)
for feature in tfkit.model.mask.get_feature_from_data(tokenizer, input=input, target=target, maxlen=20):
for k, v in feature.items():
feature[k] = [v]
print(feature)
self.assertTrue(isinstance(model(feature), Tensor))
model_dict = model(feature, eval=True)
print(model_dict)
self.assertTrue('label_map' in model_dict)
self.assertTrue('label_prob' in model_dict)
result, model_dict = model.predict(input=input)
self.assertTrue('label_prob' in model_dict[0])
self.assertTrue('label_map' in model_dict[0])
print("predict", result, len(result))
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# test exceed 512
result, model_dict = model.predict(input="T " * 512)
self.assertTrue(isinstance(result, list))
self.assertTrue(len(result[0][0]) == 0)
def testMCQ(self):
input = "你 是 誰 [SEP] [MASK] 我 [MASK] 你 [MASK] 他"
target = 1
tokenizer = BertTokenizer.from_pretrained('voidful/albert_chinese_small')
pretrained = AutoModel.from_pretrained('voidful/albert_chinese_small')
model = tfkit.model.mcq.Model(tokenizer, pretrained)
for feature in tfkit.model.mcq.get_feature_from_data(tokenizer, input=input, target=target, maxlen=20):
for k, v in feature.items():
feature[k] = [v]
print(feature)
self.assertTrue(isinstance(model(feature), Tensor))
model_dict = model(feature, eval=True)
print(model_dict)
self.assertTrue('label_map' in model_dict)
self.assertTrue('label_max' in model_dict)
result, model_dict = model.predict(input=input)
self.assertTrue('label_max' in model_dict[0])
self.assertTrue('label_map' in model_dict[0])
print("predict", result, len(result))
self.assertTrue(isinstance(result, list))
print(result)
self.assertTrue(isinstance(result[0], str))
# test exceed 512
result, model_dict = model.predict(input="T " * 300 + "[MASK]" + "T " * 300)
self.assertTrue(isinstance(result, list))
def testOnce(self):
input = "See you next time"
target = "下 次 見"
ntarget = "不 見 不 散"
tokenizer = BertTokenizer.from_pretrained('voidful/albert_chinese_tiny')
pretrained = AutoModel.from_pretrained('voidful/albert_chinese_tiny')
for feature in tfkit.model.once.get_feature_from_data(tokenizer, input=input, target=target, maxlen=20):
for k, v in feature.items():
feature[k] = [v, v]
model = tfkit.model.once.Model(tokenizer, pretrained, maxlen=20)
self.assertTrue(isinstance(model(feature), Tensor))
print(model(feature))
model_dict = model(feature, eval=True)
self.assertTrue('label_prob_all' in model_dict)
self.assertTrue('label_map' in model_dict)
result, model_dict = model.predict(input=input)
self.assertTrue('label_prob_all' in model_dict[0])
self.assertTrue('label_map' in model_dict[0])
print(result, len(result))
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# test exceed 512
result, model_dict = model.predict(input="T " * 512)
self.assertTrue(isinstance(result, list))
def testOnceCTC(self):
input = "See you next time"
target = "下 次 見"
ntarget = "不 見 不 散"
tokenizer = BertTokenizer.from_pretrained('voidful/albert_chinese_tiny')
pretrained = AutoModel.from_pretrained('voidful/albert_chinese_tiny')
for feature in tfkit.model.oncectc.get_feature_from_data(tokenizer, input=input, target=target, maxlen=50):
for k, v in feature.items():
feature[k] = [v, v]
model = tfkit.model.oncectc.Model(tokenizer, pretrained)
self.assertTrue(isinstance(model(feature), Tensor))
print(model(feature))
model_dict = model(feature, eval=True)
self.assertTrue('label_prob_all' in model_dict)
self.assertTrue('label_map' in model_dict)
# result, model_dict = model.predict(input=input)
# self.assertTrue('label_prob_all' in model_dict[0])
# self.assertTrue('label_map' in model_dict[0])
# print(result, len(result))
# self.assertTrue(isinstance(result, list))
# self.assertTrue(isinstance(result[0][0], str))
# # test exceed 512
# result, model_dict = model.predict(input="T " * 512)
# self.assertTrue(isinstance(result, list))
def testOnebyone(self):
input = "See you next time"
previous = "下 次"
target = "下 次 見"
ntarget = "不 見 不 散"
tokenizer = BertTokenizer.from_pretrained('voidful/albert_chinese_small')
pretrained = AutoModel.from_pretrained('voidful/albert_chinese_small')
maxlen = 10
model = tfkit.model.onebyone.Model(tokenizer, pretrained, maxlen=maxlen)
# package = torch.load('./cache/model.pt', map_location='cpu')
# for model_tag, state_dict in zip(package['tags'], package['models']):
# model.load_state_dict(state_dict)
# test filter sim
dummy_result = [[['表', '示', '事', '業'], 4.438073633101437],
[['表', '示', '事', '情'], 9.86092332722302],
[['表', '示', '事', '情'], 9.86092332722302]]
tfkit.utility.predictor._filter_similar(dummy_result, 3)
self.assertTrue(len(dummy_result), 3)
dummy_result = [[['表', '示', '事', '業'], 4.438073633101437],
[['表', '示', '事', '情'], 9.86092332722302]]
tfkit.utility.predictor._filter_similar(dummy_result, 3)
self.assertTrue(len(dummy_result), 2)
for feature in tfkit.model.onebyone.get_feature_from_data(tokenizer, input=input,
previous=tokenizer.tokenize(
" ".join(previous)),
target=tokenizer.tokenize(
" ".join(target)),
maxlen=maxlen):
for k, v in feature.items():
feature[k] = [v, v]
print(model(feature))
self.assertTrue(isinstance(model(feature), Tensor))
model_dict = model(feature, eval=True)
self.assertTrue('label_map' in model_dict)
# greedy
result, model_dict = model.predict(input=input)
print(result, model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 1)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# TopK
result, model_dict = model.predict(input=input, decodenum=3, mode='topK', topK=3, filtersim=False)
print("TopK no filter sim", result, len(result), model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 3)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# beamsearch
result, model_dict = model.predict(input=input, decodenum=3)
print("beamsaerch", result, len(result), model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 3)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# TopK
result, model_dict = model.predict(input=input, decodenum=3, mode='topK', topK=20)
print("TopK", result, len(result), model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 3)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# TopP
result, model_dict = model.predict(input=input, decodenum=3, mode='topP', topP=0.8)
print("TopP", result, len(result), model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 3)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# test exceed 512
result, model_dict = model.predict(input="T " * 540)
self.assertTrue(isinstance(result, list))
print("exceed max len", result)
result, model_dict = model.predict(input="T " * 550, reserved_len=10)
self.assertTrue(isinstance(result, list))
print("exceed max len with reserved len:", result)
self.assertTrue(result)
def testOnebyoneWithReservedLen(self):
tokenizer = BertTokenizer.from_pretrained('voidful/albert_chinese_tiny')
for i in tfkit.model.onebyone.get_data_from_file(GEN_DATASET):
tasks, task, input, [target, negative_text] = i
input = input.strip()
tokenized_target = tokenizer.tokenize(" ".join(target))
for j in range(1, len(tokenized_target) + 1):
feature = tfkit.model.onebyone.get_feature_from_data(tokenizer, input=input,
previous=tokenized_target[:j - 1],
target=tokenized_target[:j],
maxlen=20, reserved_len=0)[-1]
target_start = feature['start']
print(f"input: {len(feature['input'])}, {tokenizer.decode(feature['input'][:target_start])} ")
print(f"type: {len(feature['type'])}, {feature['type'][:target_start]} ")
print(f"mask: {len(feature['mask'])}, {feature['mask'][:target_start]} ")
if tokenized_target is not None:
print(
f"target: {len(feature['target'])}, {tokenizer.convert_ids_to_tokens(feature['target'][target_start])} ")
def testSeq2seq(self):
input = "See you next time"
previous = ""
target = "ok sure bye"
maxlen = 20
# tokenizer = AutoTokenizer.from_pretrained('prajjwal1/bert-small')
tokenizer = AutoTokenizer.from_pretrained('prajjwal1/bert-small')
pretrained = AutoModel.from_pretrained('prajjwal1/bert-small')
model = tfkit.model.seq2seq.Model(tokenizer, pretrained, maxlen=maxlen)
for feature in tfkit.model.seq2seq.get_feature_from_data(tokenizer, input=input,
previous=tokenizer.tokenize(previous),
maxlen=maxlen):
for k, v in feature.items():
feature[k] = [v, v]
self.assertTrue(isinstance(model(feature, eval=True), dict))
model_dict = model(feature, eval=True)
self.assertTrue('label_map' in model_dict)
# greedy
result, model_dict = model.predict(input=input)
print(result, model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 1)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# TopK
result, model_dict = model.predict(input=input, decodenum=3, mode='topK', topK=3, filtersim=False)
print("TopK no filter sim", result, len(result), model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 3)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# beamsearch
result, model_dict = model.predict(input=input, decodenum=3)
print("beamsaerch", result, len(result), model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 3)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# TopK
result, model_dict = model.predict(input=input, decodenum=3, mode='topK', topK=20)
print("TopK", result, len(result), model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 3)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# TopP
result, model_dict = model.predict(input=input, decodenum=3, mode='topP', topP=0.8)
print("TopP", result, len(result), model_dict)
self.assertTrue('label_map' in model_dict[0])
self.assertTrue(len(result) == 3)
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][0], str))
# test exceed 512
result, model_dict = model.predict(input="T " * 540)
self.assertTrue(isinstance(result, list))
print("exceed max len", result)
result, model_dict = model.predict(input="T " * 550, reserved_len=10)
self.assertTrue(isinstance(result, list))
print("exceed max len with reserved len:", result)
self.assertTrue(result)
def testOnebyoneSeq2seqTime(self):
maxlen = 512
tokenizer = AutoTokenizer.from_pretrained('prajjwal1/bert-small')
pretrained = AutoModel.from_pretrained('prajjwal1/bert-small')
seq2seq_model = tfkit.model.seq2seq.Model(tokenizer, pretrained, maxlen=maxlen)
onebyone_model = tfkit.model.onebyone.Model(tokenizer, pretrained, maxlen=maxlen)
# test exceed 512
start = timeit.default_timer()
seq2seq_model.predict(input="T " * 256)
stop = timeit.default_timer()
print('Seq2Seq Time: ', stop - start)
# test exceed 512
start = timeit.default_timer()
onebyone_model.predict(input="T " * 256)
stop = timeit.default_timer()
print('Once Time: ', stop - start)
| 53.600437
| 2,588
| 0.613426
|
8b9e5eee706cb1f11269e815f59c1b512c24feeb
| 927
|
py
|
Python
|
tests/transformer/test_output_transformer_api.py
|
ambiata/atmosphere-python-sdk
|
48880a8553000cdea59d63b0fba49e1f0f482784
|
[
"MIT"
] | null | null | null |
tests/transformer/test_output_transformer_api.py
|
ambiata/atmosphere-python-sdk
|
48880a8553000cdea59d63b0fba49e1f0f482784
|
[
"MIT"
] | 9
|
2021-02-21T21:53:03.000Z
|
2021-11-05T06:06:55.000Z
|
tests/transformer/test_output_transformer_api.py
|
ambiata/atmosphere-python-sdk
|
48880a8553000cdea59d63b0fba49e1f0f482784
|
[
"MIT"
] | null | null | null |
from atmosphere.transformer.output_transformer import OutputTransformer
from atmosphere.transformer.pydantic_models import PredictionResponse
from .utils import get_test_app
def test_valid_request():
class TestOutputTransformer(OutputTransformer):
def apply_transformation(self, msg) -> PredictionResponse:
return PredictionResponse(action_name="test_action")
app = get_test_app(TestOutputTransformer())
with app.test_client() as client:
response = client.post(
"/transform-output",
json={
"data": {
"names": ["a", "b"],
"tensor": {"shape": [2, 2], "values": [0, 0, 1, 1]},
}
},
content_type="application/json",
)
assert response.status_code == 200, response
assert response.json["jsonData"]["action_name"] == "test_action", response.json
| 35.653846
| 87
| 0.615965
|
e53933a23a453c32f17c71f6bcd46ccf2de5b8df
| 192
|
py
|
Python
|
main.py
|
rug-gui/T-framework
|
d3629b97cdbbe216f5cdad8133c65065e837adbb
|
[
"MIT"
] | 3
|
2021-12-26T10:07:21.000Z
|
2022-02-09T02:02:15.000Z
|
main.py
|
rug-gui/T-framework
|
d3629b97cdbbe216f5cdad8133c65065e837adbb
|
[
"MIT"
] | 1
|
2022-02-06T07:56:50.000Z
|
2022-02-06T07:56:50.000Z
|
main.py
|
rug-gui/T-framework
|
d3629b97cdbbe216f5cdad8133c65065e837adbb
|
[
"MIT"
] | null | null | null |
import sqlite3
conn = sqlite3.connect('personal.db')
c = conn.cursor()
x = c.execute("select OPTION,VALUE from PREFERENCES where OPTION='ViewMode'")
vmode=x.fetchall()
#print(vmode[0][1])
| 32
| 78
| 0.71875
|
bc687f72dbf7f7bded2d185e6796bfedceb4a24e
| 345
|
py
|
Python
|
src/629B.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | 2
|
2016-08-19T09:47:03.000Z
|
2016-10-01T10:15:03.000Z
|
src/629B.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | null | null | null |
src/629B.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | 1
|
2015-07-01T23:57:32.000Z
|
2015-07-01T23:57:32.000Z
|
n = int(input())
f, m = [0]*368, [0]*368
for i in range(n):
s, a, b = input().split()
if s == 'F':
f[int(a)] += 1
f[int(b)+1] -= 1
elif s == 'M':
m[int(a)] += 1
m[int(b)+1] -= 1
for i in range(1, len(f)):
f[i] += f[i-1]
m[i] += m[i-1]
print(max([min(f[i], m[i])*2 for i in range(1, 367)]))
| 19.166667
| 54
| 0.397101
|
e0dc69e1ccd081b96fefd15cdf8626d2d03ba679
| 106,970
|
py
|
Python
|
Lib/test/test_shutil.py
|
milanbalazs/cpython
|
5f0555e85688c27b064fdfe88cdc9b2b59432231
|
[
"0BSD"
] | null | null | null |
Lib/test/test_shutil.py
|
milanbalazs/cpython
|
5f0555e85688c27b064fdfe88cdc9b2b59432231
|
[
"0BSD"
] | null | null | null |
Lib/test/test_shutil.py
|
milanbalazs/cpython
|
5f0555e85688c27b064fdfe88cdc9b2b59432231
|
[
"0BSD"
] | null | null | null |
# Copyright (C) 2003 Python Software Foundation
import unittest
import unittest.mock
import shutil
import tempfile
import sys
import stat
import os
import os.path
import errno
import functools
import pathlib
import subprocess
import random
import string
import contextlib
import io
from shutil import (make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats, Error, unpack_archive,
register_unpack_format, RegistryError,
unregister_unpack_format, get_unpack_formats,
SameFileError, _GiveupOnFastCopy)
import tarfile
import zipfile
try:
import posix
except ImportError:
posix = None
from test import support
from test.support import os_helper
from test.support.os_helper import TESTFN, FakePath
TESTFN2 = TESTFN + "2"
TESTFN_SRC = TESTFN + "_SRC"
TESTFN_DST = TESTFN + "_DST"
MACOS = sys.platform.startswith("darwin")
SOLARIS = sys.platform.startswith("sunos")
AIX = sys.platform[:3] == 'aix'
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import _winapi
except ImportError:
_winapi = None
def _fake_rename(*args, **kwargs):
# Pretend the destination path is on a different filesystem.
raise OSError(getattr(errno, 'EXDEV', 18), "Invalid cross-device link")
def mock_rename(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
builtin_rename = os.rename
os.rename = _fake_rename
return func(*args, **kwargs)
finally:
os.rename = builtin_rename
return wrap
def write_file(path, content, binary=False):
"""Write *content* to a file located at *path*.
If *path* is a tuple instead of a string, os.path.join will be used to
make a path. If *binary* is true, the file will be opened in binary
mode.
"""
if isinstance(path, tuple):
path = os.path.join(*path)
mode = 'wb' if binary else 'w'
encoding = None if binary else "utf-8"
with open(path, mode, encoding=encoding) as fp:
fp.write(content)
def write_test_file(path, size):
"""Create a test file with an arbitrary size and random text content."""
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
bufsize = min(size, 8192)
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(bufsize)])
with open(path, 'wb') as f:
for csize in chunks(size, bufsize):
f.write(chunk)
assert os.path.getsize(path) == size
def read_file(path, binary=False):
"""Return contents from a file located at *path*.
If *path* is a tuple instead of a string, os.path.join will be used to
make a path. If *binary* is true, the file will be opened in binary
mode.
"""
if isinstance(path, tuple):
path = os.path.join(*path)
mode = 'rb' if binary else 'r'
encoding = None if binary else "utf-8"
with open(path, mode, encoding=encoding) as fp:
return fp.read()
def rlistdir(path):
res = []
for name in sorted(os.listdir(path)):
p = os.path.join(path, name)
if os.path.isdir(p) and not os.path.islink(p):
res.append(name + '/')
for n in rlistdir(p):
res.append(name + '/' + n)
else:
res.append(name)
return res
def supports_file2file_sendfile():
# ...apparently Linux and Solaris are the only ones
if not hasattr(os, "sendfile"):
return False
srcname = None
dstname = None
try:
with tempfile.NamedTemporaryFile("wb", dir=os.getcwd(), delete=False) as f:
srcname = f.name
f.write(b"0123456789")
with open(srcname, "rb") as src:
with tempfile.NamedTemporaryFile("wb", dir=os.getcwd(), delete=False) as dst:
dstname = dst.name
infd = src.fileno()
outfd = dst.fileno()
try:
os.sendfile(outfd, infd, 0, 2)
except OSError:
return False
else:
return True
finally:
if srcname is not None:
os_helper.unlink(srcname)
if dstname is not None:
os_helper.unlink(dstname)
SUPPORTS_SENDFILE = supports_file2file_sendfile()
# AIX 32-bit mode, by default, lacks enough memory for the xz/lzma compiler test
# The AIX command 'dump -o program' gives XCOFF header information
# The second word of the last line in the maxdata value
# when 32-bit maxdata must be greater than 0x1000000 for the xz test to succeed
def _maxdataOK():
if AIX and sys.maxsize == 2147483647:
hdrs=subprocess.getoutput("/usr/bin/dump -o %s" % sys.executable)
maxdata=hdrs.split("\n")[-1].split()[1]
return int(maxdata,16) >= 0x20000000
else:
return True
class BaseTest:
def mkdtemp(self, prefix=None):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp(prefix=prefix, dir=os.getcwd())
self.addCleanup(os_helper.rmtree, d)
return d
class TestRmTree(BaseTest, unittest.TestCase):
def test_rmtree_works_on_bytes(self):
tmp = self.mkdtemp()
victim = os.path.join(tmp, 'killme')
os.mkdir(victim)
write_file(os.path.join(victim, 'somefile'), 'foo')
victim = os.fsencode(victim)
self.assertIsInstance(victim, bytes)
shutil.rmtree(victim)
@os_helper.skip_unless_symlink
def test_rmtree_fails_on_symlink(self):
tmp = self.mkdtemp()
dir_ = os.path.join(tmp, 'dir')
os.mkdir(dir_)
link = os.path.join(tmp, 'link')
os.symlink(dir_, link)
self.assertRaises(OSError, shutil.rmtree, link)
self.assertTrue(os.path.exists(dir_))
self.assertTrue(os.path.lexists(link))
errors = []
def onerror(*args):
errors.append(args)
shutil.rmtree(link, onerror=onerror)
self.assertEqual(len(errors), 1)
self.assertIs(errors[0][0], os.path.islink)
self.assertEqual(errors[0][1], link)
self.assertIsInstance(errors[0][2][1], OSError)
@os_helper.skip_unless_symlink
def test_rmtree_works_on_symlinks(self):
tmp = self.mkdtemp()
dir1 = os.path.join(tmp, 'dir1')
dir2 = os.path.join(dir1, 'dir2')
dir3 = os.path.join(tmp, 'dir3')
for d in dir1, dir2, dir3:
os.mkdir(d)
file1 = os.path.join(tmp, 'file1')
write_file(file1, 'foo')
link1 = os.path.join(dir1, 'link1')
os.symlink(dir2, link1)
link2 = os.path.join(dir1, 'link2')
os.symlink(dir3, link2)
link3 = os.path.join(dir1, 'link3')
os.symlink(file1, link3)
# make sure symlinks are removed but not followed
shutil.rmtree(dir1)
self.assertFalse(os.path.exists(dir1))
self.assertTrue(os.path.exists(dir3))
self.assertTrue(os.path.exists(file1))
@unittest.skipUnless(_winapi, 'only relevant on Windows')
def test_rmtree_fails_on_junctions(self):
tmp = self.mkdtemp()
dir_ = os.path.join(tmp, 'dir')
os.mkdir(dir_)
link = os.path.join(tmp, 'link')
_winapi.CreateJunction(dir_, link)
self.addCleanup(os_helper.unlink, link)
self.assertRaises(OSError, shutil.rmtree, link)
self.assertTrue(os.path.exists(dir_))
self.assertTrue(os.path.lexists(link))
errors = []
def onerror(*args):
errors.append(args)
shutil.rmtree(link, onerror=onerror)
self.assertEqual(len(errors), 1)
self.assertIs(errors[0][0], os.path.islink)
self.assertEqual(errors[0][1], link)
self.assertIsInstance(errors[0][2][1], OSError)
@unittest.skipUnless(_winapi, 'only relevant on Windows')
def test_rmtree_works_on_junctions(self):
tmp = self.mkdtemp()
dir1 = os.path.join(tmp, 'dir1')
dir2 = os.path.join(dir1, 'dir2')
dir3 = os.path.join(tmp, 'dir3')
for d in dir1, dir2, dir3:
os.mkdir(d)
file1 = os.path.join(tmp, 'file1')
write_file(file1, 'foo')
link1 = os.path.join(dir1, 'link1')
_winapi.CreateJunction(dir2, link1)
link2 = os.path.join(dir1, 'link2')
_winapi.CreateJunction(dir3, link2)
link3 = os.path.join(dir1, 'link3')
_winapi.CreateJunction(file1, link3)
# make sure junctions are removed but not followed
shutil.rmtree(dir1)
self.assertFalse(os.path.exists(dir1))
self.assertTrue(os.path.exists(dir3))
self.assertTrue(os.path.exists(file1))
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp(dir=self.mkdtemp())
self.assertRaises(FileNotFoundError, shutil.rmtree, filename)
# test that ignore_errors option is honored
shutil.rmtree(filename, ignore_errors=True)
# existing file
tmpdir = self.mkdtemp()
write_file((tmpdir, "tstfile"), "")
filename = os.path.join(tmpdir, "tstfile")
with self.assertRaises(NotADirectoryError) as cm:
shutil.rmtree(filename)
self.assertEqual(cm.exception.filename, filename)
self.assertTrue(os.path.exists(filename))
# test that ignore_errors option is honored
shutil.rmtree(filename, ignore_errors=True)
self.assertTrue(os.path.exists(filename))
errors = []
def onerror(*args):
errors.append(args)
shutil.rmtree(filename, onerror=onerror)
self.assertEqual(len(errors), 2)
self.assertIs(errors[0][0], os.scandir)
self.assertEqual(errors[0][1], filename)
self.assertIsInstance(errors[0][2][1], NotADirectoryError)
self.assertEqual(errors[0][2][1].filename, filename)
self.assertIs(errors[1][0], os.rmdir)
self.assertEqual(errors[1][1], filename)
self.assertIsInstance(errors[1][2][1], NotADirectoryError)
self.assertEqual(errors[1][2][1].filename, filename)
@unittest.skipIf(sys.platform[:6] == 'cygwin',
"This test can't be run on Cygwin (issue #1071513).")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"This test can't be run reliably as root (issue #1076467).")
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
self.child_file_path = os.path.join(TESTFN, 'a')
self.child_dir_path = os.path.join(TESTFN, 'b')
os_helper.create_empty_file(self.child_file_path)
os.mkdir(self.child_dir_path)
old_dir_mode = os.stat(TESTFN).st_mode
old_child_file_mode = os.stat(self.child_file_path).st_mode
old_child_dir_mode = os.stat(self.child_dir_path).st_mode
# Make unwritable.
new_mode = stat.S_IREAD|stat.S_IEXEC
os.chmod(self.child_file_path, new_mode)
os.chmod(self.child_dir_path, new_mode)
os.chmod(TESTFN, new_mode)
self.addCleanup(os.chmod, TESTFN, old_dir_mode)
self.addCleanup(os.chmod, self.child_file_path, old_child_file_mode)
self.addCleanup(os.chmod, self.child_dir_path, old_child_dir_mode)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 3,
"Expected call to onerror function did not happen.")
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 500, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState < 2:
if func is os.unlink:
self.assertEqual(arg, self.child_file_path)
elif func is os.rmdir:
self.assertEqual(arg, self.child_dir_path)
else:
self.assertIs(func, os.listdir)
self.assertIn(arg, [TESTFN, self.child_dir_path])
self.assertTrue(issubclass(exc[0], OSError))
self.errorState += 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 3
def test_rmtree_does_not_choke_on_failing_lstat(self):
try:
orig_lstat = os.lstat
def raiser(fn, *args, **kwargs):
if fn != TESTFN:
raise OSError()
else:
return orig_lstat(fn)
os.lstat = raiser
os.mkdir(TESTFN)
write_file((TESTFN, 'foo'), 'foo')
shutil.rmtree(TESTFN)
finally:
os.lstat = orig_lstat
def test_rmtree_uses_safe_fd_version_if_available(self):
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
if _use_fd_functions:
self.assertTrue(shutil._use_fd_functions)
self.assertTrue(shutil.rmtree.avoids_symlink_attacks)
tmp_dir = self.mkdtemp()
d = os.path.join(tmp_dir, 'a')
os.mkdir(d)
try:
real_rmtree = shutil._rmtree_safe_fd
class Called(Exception): pass
def _raiser(*args, **kwargs):
raise Called
shutil._rmtree_safe_fd = _raiser
self.assertRaises(Called, shutil.rmtree, d)
finally:
shutil._rmtree_safe_fd = real_rmtree
else:
self.assertFalse(shutil._use_fd_functions)
self.assertFalse(shutil.rmtree.avoids_symlink_attacks)
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp(dir=self.mkdtemp())
os.close(handle)
self.assertRaises(NotADirectoryError, shutil.rmtree, path)
os.remove(path)
@os_helper.skip_unless_symlink
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
shutil.rmtree(dst, ignore_errors=True)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@unittest.skipUnless(_winapi, 'only relevant on Windows')
def test_rmtree_on_junction(self):
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
open(os.path.join(src, 'spam'), 'wb').close()
_winapi.CreateJunction(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
shutil.rmtree(dst, ignore_errors=True)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
class TestCopyTree(BaseTest, unittest.TestCase):
def test_copytree_simple(self):
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
self.addCleanup(shutil.rmtree, src_dir)
self.addCleanup(shutil.rmtree, os.path.dirname(dst_dir))
write_file((src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_file((dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_file((dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
def test_copytree_dirs_exist_ok(self):
src_dir = self.mkdtemp()
dst_dir = self.mkdtemp()
self.addCleanup(shutil.rmtree, src_dir)
self.addCleanup(shutil.rmtree, dst_dir)
write_file((src_dir, 'nonexisting.txt'), '123')
os.mkdir(os.path.join(src_dir, 'existing_dir'))
os.mkdir(os.path.join(dst_dir, 'existing_dir'))
write_file((dst_dir, 'existing_dir', 'existing.txt'), 'will be replaced')
write_file((src_dir, 'existing_dir', 'existing.txt'), 'has been replaced')
shutil.copytree(src_dir, dst_dir, dirs_exist_ok=True)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'nonexisting.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'existing_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'existing_dir',
'existing.txt')))
actual = read_file((dst_dir, 'nonexisting.txt'))
self.assertEqual(actual, '123')
actual = read_file((dst_dir, 'existing_dir', 'existing.txt'))
self.assertEqual(actual, 'has been replaced')
with self.assertRaises(FileExistsError):
shutil.copytree(src_dir, dst_dir, dirs_exist_ok=False)
@os_helper.skip_unless_symlink
def test_copytree_symlinks(self):
tmp_dir = self.mkdtemp()
src_dir = os.path.join(tmp_dir, 'src')
dst_dir = os.path.join(tmp_dir, 'dst')
sub_dir = os.path.join(src_dir, 'sub')
os.mkdir(src_dir)
os.mkdir(sub_dir)
write_file((src_dir, 'file.txt'), 'foo')
src_link = os.path.join(sub_dir, 'link')
dst_link = os.path.join(dst_dir, 'sub/link')
os.symlink(os.path.join(src_dir, 'file.txt'),
src_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXU | stat.S_IRWXO)
if hasattr(os, 'lchflags') and hasattr(stat, 'UF_NODUMP'):
os.lchflags(src_link, stat.UF_NODUMP)
src_stat = os.lstat(src_link)
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertTrue(os.path.islink(os.path.join(dst_dir, 'sub', 'link')))
actual = os.readlink(os.path.join(dst_dir, 'sub', 'link'))
# Bad practice to blindly strip the prefix as it may be required to
# correctly refer to the file, but we're only comparing paths here.
if os.name == 'nt' and actual.startswith('\\\\?\\'):
actual = actual[4:]
self.assertEqual(actual, os.path.join(src_dir, 'file.txt'))
dst_stat = os.lstat(dst_link)
if hasattr(os, 'lchmod'):
self.assertEqual(dst_stat.st_mode, src_stat.st_mode)
if hasattr(os, 'lchflags'):
self.assertEqual(dst_stat.st_flags, src_stat.st_flags)
def test_copytree_with_exclude(self):
# creating data
join = os.path.join
exists = os.path.exists
src_dir = self.mkdtemp()
try:
dst_dir = join(self.mkdtemp(), 'destination')
write_file((src_dir, 'test.txt'), '123')
write_file((src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
write_file((src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
write_file((src_dir, 'test_dir2', 'subdir', 'test.txt'), '456')
write_file((src_dir, 'test_dir2', 'subdir2', 'test.py'), '456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertFalse(exists(join(dst_dir, 'test.tmp')))
self.assertFalse(exists(join(dst_dir, 'test_dir2')))
finally:
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertFalse(exists(join(dst_dir, 'test.tmp')))
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
def test_copytree_arg_types_of_ignore(self):
join = os.path.join
exists = os.path.exists
tmp_dir = self.mkdtemp()
src_dir = join(tmp_dir, "source")
os.mkdir(join(src_dir))
os.mkdir(join(src_dir, 'test_dir'))
os.mkdir(os.path.join(src_dir, 'test_dir', 'subdir'))
write_file((src_dir, 'test_dir', 'subdir', 'test.txt'), '456')
invokations = []
def _ignore(src, names):
invokations.append(src)
self.assertIsInstance(src, str)
self.assertIsInstance(names, list)
self.assertEqual(len(names), len(set(names)))
for name in names:
self.assertIsInstance(name, str)
return []
dst_dir = join(self.mkdtemp(), 'destination')
shutil.copytree(src_dir, dst_dir, ignore=_ignore)
self.assertTrue(exists(join(dst_dir, 'test_dir', 'subdir',
'test.txt')))
dst_dir = join(self.mkdtemp(), 'destination')
shutil.copytree(pathlib.Path(src_dir), dst_dir, ignore=_ignore)
self.assertTrue(exists(join(dst_dir, 'test_dir', 'subdir',
'test.txt')))
dst_dir = join(self.mkdtemp(), 'destination')
src_dir_entry = list(os.scandir(tmp_dir))[0]
self.assertIsInstance(src_dir_entry, os.DirEntry)
shutil.copytree(src_dir_entry, dst_dir, ignore=_ignore)
self.assertTrue(exists(join(dst_dir, 'test_dir', 'subdir',
'test.txt')))
self.assertEqual(len(invokations), 9)
def test_copytree_retains_permissions(self):
tmp_dir = self.mkdtemp()
src_dir = os.path.join(tmp_dir, 'source')
os.mkdir(src_dir)
dst_dir = os.path.join(tmp_dir, 'destination')
self.addCleanup(shutil.rmtree, tmp_dir)
os.chmod(src_dir, 0o777)
write_file((src_dir, 'permissive.txt'), '123')
os.chmod(os.path.join(src_dir, 'permissive.txt'), 0o777)
write_file((src_dir, 'restrictive.txt'), '456')
os.chmod(os.path.join(src_dir, 'restrictive.txt'), 0o600)
restrictive_subdir = tempfile.mkdtemp(dir=src_dir)
self.addCleanup(os_helper.rmtree, restrictive_subdir)
os.chmod(restrictive_subdir, 0o600)
shutil.copytree(src_dir, dst_dir)
self.assertEqual(os.stat(src_dir).st_mode, os.stat(dst_dir).st_mode)
self.assertEqual(os.stat(os.path.join(src_dir, 'permissive.txt')).st_mode,
os.stat(os.path.join(dst_dir, 'permissive.txt')).st_mode)
self.assertEqual(os.stat(os.path.join(src_dir, 'restrictive.txt')).st_mode,
os.stat(os.path.join(dst_dir, 'restrictive.txt')).st_mode)
restrictive_subdir_dst = os.path.join(dst_dir,
os.path.split(restrictive_subdir)[1])
self.assertEqual(os.stat(restrictive_subdir).st_mode,
os.stat(restrictive_subdir_dst).st_mode)
@unittest.mock.patch('os.chmod')
def test_copytree_winerror(self, mock_patch):
# When copying to VFAT, copystat() raises OSError. On Windows, the
# exception object has a meaningful 'winerror' attribute, but not
# on other operating systems. Do not assume 'winerror' is set.
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
self.addCleanup(shutil.rmtree, src_dir)
self.addCleanup(shutil.rmtree, os.path.dirname(dst_dir))
mock_patch.side_effect = PermissionError('ka-boom')
with self.assertRaises(shutil.Error):
shutil.copytree(src_dir, dst_dir)
def test_copytree_custom_copy_function(self):
# See: https://bugs.python.org/issue35648
def custom_cpfun(a, b):
flag.append(None)
self.assertIsInstance(a, str)
self.assertIsInstance(b, str)
self.assertEqual(a, os.path.join(src, 'foo'))
self.assertEqual(b, os.path.join(dst, 'foo'))
flag = []
src = self.mkdtemp()
dst = tempfile.mktemp(dir=self.mkdtemp())
with open(os.path.join(src, 'foo'), 'w', encoding='utf-8') as f:
f.close()
shutil.copytree(src, dst, copy_function=custom_cpfun)
self.assertEqual(len(flag), 1)
# Issue #3002: copyfile and copytree block indefinitely on named pipes
@unittest.skipUnless(hasattr(os, "mkfifo"), 'requires os.mkfifo()')
@os_helper.skip_unless_symlink
@unittest.skipIf(sys.platform == "vxworks",
"fifo requires special path on VxWorks")
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
try:
os.mkfifo(pipe)
except PermissionError as e:
self.skipTest('os.mkfifo(): %s' % e)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
def test_copytree_special_func(self):
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
write_file((src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
copied = []
def _copy(src, dst):
copied.append((src, dst))
shutil.copytree(src_dir, dst_dir, copy_function=_copy)
self.assertEqual(len(copied), 2)
@os_helper.skip_unless_symlink
def test_copytree_dangling_symlinks(self):
# a dangling symlink raises an error at the end
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
os.symlink('IDONTEXIST', os.path.join(src_dir, 'test.txt'))
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
self.assertRaises(Error, shutil.copytree, src_dir, dst_dir)
# a dangling symlink is ignored with the proper flag
dst_dir = os.path.join(self.mkdtemp(), 'destination2')
shutil.copytree(src_dir, dst_dir, ignore_dangling_symlinks=True)
self.assertNotIn('test.txt', os.listdir(dst_dir))
# a dangling symlink is copied if symlinks=True
dst_dir = os.path.join(self.mkdtemp(), 'destination3')
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertIn('test.txt', os.listdir(dst_dir))
@os_helper.skip_unless_symlink
def test_copytree_symlink_dir(self):
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
os.mkdir(os.path.join(src_dir, 'real_dir'))
with open(os.path.join(src_dir, 'real_dir', 'test.txt'), 'wb'):
pass
os.symlink(os.path.join(src_dir, 'real_dir'),
os.path.join(src_dir, 'link_to_dir'),
target_is_directory=True)
shutil.copytree(src_dir, dst_dir, symlinks=False)
self.assertFalse(os.path.islink(os.path.join(dst_dir, 'link_to_dir')))
self.assertIn('test.txt', os.listdir(os.path.join(dst_dir, 'link_to_dir')))
dst_dir = os.path.join(self.mkdtemp(), 'destination2')
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertTrue(os.path.islink(os.path.join(dst_dir, 'link_to_dir')))
self.assertIn('test.txt', os.listdir(os.path.join(dst_dir, 'link_to_dir')))
def test_copytree_return_value(self):
# copytree returns its destination path.
src_dir = self.mkdtemp()
dst_dir = src_dir + "dest"
self.addCleanup(shutil.rmtree, dst_dir, True)
src = os.path.join(src_dir, 'foo')
write_file(src, 'foo')
rv = shutil.copytree(src_dir, dst_dir)
self.assertEqual(['foo'], os.listdir(rv))
def test_copytree_subdirectory(self):
# copytree where dst is a subdirectory of src, see Issue 38688
base_dir = self.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir, ignore_errors=True)
src_dir = os.path.join(base_dir, "t", "pg")
dst_dir = os.path.join(src_dir, "somevendor", "1.0")
os.makedirs(src_dir)
src = os.path.join(src_dir, 'pol')
write_file(src, 'pol')
rv = shutil.copytree(src_dir, dst_dir)
self.assertEqual(['pol'], os.listdir(rv))
class TestCopy(BaseTest, unittest.TestCase):
### shutil.copymode
@os_helper.skip_unless_symlink
def test_copymode_follow_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'quux')
write_file(src, 'foo')
write_file(dst, 'foo')
os.symlink(src, src_link)
os.symlink(dst, dst_link)
os.chmod(src, stat.S_IRWXU|stat.S_IRWXG)
# file to file
os.chmod(dst, stat.S_IRWXO)
self.assertNotEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
shutil.copymode(src, dst)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# On Windows, os.chmod does not follow symlinks (issue #15411)
if os.name != 'nt':
# follow src link
os.chmod(dst, stat.S_IRWXO)
shutil.copymode(src_link, dst)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# follow dst link
os.chmod(dst, stat.S_IRWXO)
shutil.copymode(src, dst_link)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# follow both links
os.chmod(dst, stat.S_IRWXO)
shutil.copymode(src_link, dst_link)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
@unittest.skipUnless(hasattr(os, 'lchmod'), 'requires os.lchmod')
@os_helper.skip_unless_symlink
def test_copymode_symlink_to_symlink(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'quux')
write_file(src, 'foo')
write_file(dst, 'foo')
os.symlink(src, src_link)
os.symlink(dst, dst_link)
os.chmod(src, stat.S_IRWXU|stat.S_IRWXG)
os.chmod(dst, stat.S_IRWXU)
os.lchmod(src_link, stat.S_IRWXO|stat.S_IRWXG)
# link to link
os.lchmod(dst_link, stat.S_IRWXO)
shutil.copymode(src_link, dst_link, follow_symlinks=False)
self.assertEqual(os.lstat(src_link).st_mode,
os.lstat(dst_link).st_mode)
self.assertNotEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# src link - use chmod
os.lchmod(dst_link, stat.S_IRWXO)
shutil.copymode(src_link, dst, follow_symlinks=False)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# dst link - use chmod
os.lchmod(dst_link, stat.S_IRWXO)
shutil.copymode(src, dst_link, follow_symlinks=False)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
@unittest.skipIf(hasattr(os, 'lchmod'), 'requires os.lchmod to be missing')
@os_helper.skip_unless_symlink
def test_copymode_symlink_to_symlink_wo_lchmod(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'quux')
write_file(src, 'foo')
write_file(dst, 'foo')
os.symlink(src, src_link)
os.symlink(dst, dst_link)
shutil.copymode(src_link, dst_link, follow_symlinks=False) # silent fail
### shutil.copystat
@os_helper.skip_unless_symlink
def test_copystat_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'qux')
write_file(src, 'foo')
src_stat = os.stat(src)
os.utime(src, (src_stat.st_atime,
src_stat.st_mtime - 42.0)) # ensure different mtimes
write_file(dst, 'bar')
self.assertNotEqual(os.stat(src).st_mtime, os.stat(dst).st_mtime)
os.symlink(src, src_link)
os.symlink(dst, dst_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXO)
if hasattr(os, 'lchflags') and hasattr(stat, 'UF_NODUMP'):
os.lchflags(src_link, stat.UF_NODUMP)
src_link_stat = os.lstat(src_link)
# follow
if hasattr(os, 'lchmod'):
shutil.copystat(src_link, dst_link, follow_symlinks=True)
self.assertNotEqual(src_link_stat.st_mode, os.stat(dst).st_mode)
# don't follow
shutil.copystat(src_link, dst_link, follow_symlinks=False)
dst_link_stat = os.lstat(dst_link)
if os.utime in os.supports_follow_symlinks:
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(src_link_stat, attr),
getattr(dst_link_stat, attr) + 1)
if hasattr(os, 'lchmod'):
self.assertEqual(src_link_stat.st_mode, dst_link_stat.st_mode)
if hasattr(os, 'lchflags') and hasattr(src_link_stat, 'st_flags'):
self.assertEqual(src_link_stat.st_flags, dst_link_stat.st_flags)
# tell to follow but dst is not a link
shutil.copystat(src_link, dst, follow_symlinks=False)
self.assertTrue(abs(os.stat(src).st_mtime - os.stat(dst).st_mtime) <
00000.1)
@unittest.skipUnless(hasattr(os, 'chflags') and
hasattr(errno, 'EOPNOTSUPP') and
hasattr(errno, 'ENOTSUP'),
"requires os.chflags, EOPNOTSUPP & ENOTSUP")
def test_copystat_handles_harmless_chflags_errors(self):
tmpdir = self.mkdtemp()
file1 = os.path.join(tmpdir, 'file1')
file2 = os.path.join(tmpdir, 'file2')
write_file(file1, 'xxx')
write_file(file2, 'xxx')
def make_chflags_raiser(err):
ex = OSError()
def _chflags_raiser(path, flags, *, follow_symlinks=True):
ex.errno = err
raise ex
return _chflags_raiser
old_chflags = os.chflags
try:
for err in errno.EOPNOTSUPP, errno.ENOTSUP:
os.chflags = make_chflags_raiser(err)
shutil.copystat(file1, file2)
# assert others errors break it
os.chflags = make_chflags_raiser(errno.EOPNOTSUPP + errno.ENOTSUP)
self.assertRaises(OSError, shutil.copystat, file1, file2)
finally:
os.chflags = old_chflags
### shutil.copyxattr
@os_helper.skip_unless_xattr
def test_copyxattr(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
write_file(src, 'foo')
dst = os.path.join(tmp_dir, 'bar')
write_file(dst, 'bar')
# no xattr == no problem
shutil._copyxattr(src, dst)
# common case
os.setxattr(src, 'user.foo', b'42')
os.setxattr(src, 'user.bar', b'43')
shutil._copyxattr(src, dst)
self.assertEqual(sorted(os.listxattr(src)), sorted(os.listxattr(dst)))
self.assertEqual(
os.getxattr(src, 'user.foo'),
os.getxattr(dst, 'user.foo'))
# check errors don't affect other attrs
os.remove(dst)
write_file(dst, 'bar')
os_error = OSError(errno.EPERM, 'EPERM')
def _raise_on_user_foo(fname, attr, val, **kwargs):
if attr == 'user.foo':
raise os_error
else:
orig_setxattr(fname, attr, val, **kwargs)
try:
orig_setxattr = os.setxattr
os.setxattr = _raise_on_user_foo
shutil._copyxattr(src, dst)
self.assertIn('user.bar', os.listxattr(dst))
finally:
os.setxattr = orig_setxattr
# the source filesystem not supporting xattrs should be ok, too.
def _raise_on_src(fname, *, follow_symlinks=True):
if fname == src:
raise OSError(errno.ENOTSUP, 'Operation not supported')
return orig_listxattr(fname, follow_symlinks=follow_symlinks)
try:
orig_listxattr = os.listxattr
os.listxattr = _raise_on_src
shutil._copyxattr(src, dst)
finally:
os.listxattr = orig_listxattr
# test that shutil.copystat copies xattrs
src = os.path.join(tmp_dir, 'the_original')
srcro = os.path.join(tmp_dir, 'the_original_ro')
write_file(src, src)
write_file(srcro, srcro)
os.setxattr(src, 'user.the_value', b'fiddly')
os.setxattr(srcro, 'user.the_value', b'fiddly')
os.chmod(srcro, 0o444)
dst = os.path.join(tmp_dir, 'the_copy')
dstro = os.path.join(tmp_dir, 'the_copy_ro')
write_file(dst, dst)
write_file(dstro, dstro)
shutil.copystat(src, dst)
shutil.copystat(srcro, dstro)
self.assertEqual(os.getxattr(dst, 'user.the_value'), b'fiddly')
self.assertEqual(os.getxattr(dstro, 'user.the_value'), b'fiddly')
@os_helper.skip_unless_symlink
@os_helper.skip_unless_xattr
@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() == 0,
'root privileges required')
def test_copyxattr_symlinks(self):
# On Linux, it's only possible to access non-user xattr for symlinks;
# which in turn require root privileges. This test should be expanded
# as soon as other platforms gain support for extended attributes.
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
src_link = os.path.join(tmp_dir, 'baz')
write_file(src, 'foo')
os.symlink(src, src_link)
os.setxattr(src, 'trusted.foo', b'42')
os.setxattr(src_link, 'trusted.foo', b'43', follow_symlinks=False)
dst = os.path.join(tmp_dir, 'bar')
dst_link = os.path.join(tmp_dir, 'qux')
write_file(dst, 'bar')
os.symlink(dst, dst_link)
shutil._copyxattr(src_link, dst_link, follow_symlinks=False)
self.assertEqual(os.getxattr(dst_link, 'trusted.foo', follow_symlinks=False), b'43')
self.assertRaises(OSError, os.getxattr, dst, 'trusted.foo')
shutil._copyxattr(src_link, dst, follow_symlinks=False)
self.assertEqual(os.getxattr(dst, 'trusted.foo'), b'43')
### shutil.copy
def _copy_file(self, method):
fname = 'test.txt'
tmpdir = self.mkdtemp()
write_file((tmpdir, fname), 'xxx')
file1 = os.path.join(tmpdir, fname)
tmpdir2 = self.mkdtemp()
method(file1, tmpdir2)
file2 = os.path.join(tmpdir2, fname)
return (file1, file2)
def test_copy(self):
# Ensure that the copied file exists and has the same mode bits.
file1, file2 = self._copy_file(shutil.copy)
self.assertTrue(os.path.exists(file2))
self.assertEqual(os.stat(file1).st_mode, os.stat(file2).st_mode)
@os_helper.skip_unless_symlink
def test_copy_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
write_file(src, 'foo')
os.symlink(src, src_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXU | stat.S_IRWXO)
# don't follow
shutil.copy(src_link, dst, follow_symlinks=True)
self.assertFalse(os.path.islink(dst))
self.assertEqual(read_file(src), read_file(dst))
os.remove(dst)
# follow
shutil.copy(src_link, dst, follow_symlinks=False)
self.assertTrue(os.path.islink(dst))
self.assertEqual(os.readlink(dst), os.readlink(src_link))
if hasattr(os, 'lchmod'):
self.assertEqual(os.lstat(src_link).st_mode,
os.lstat(dst).st_mode)
### shutil.copy2
@unittest.skipUnless(hasattr(os, 'utime'), 'requires os.utime')
def test_copy2(self):
# Ensure that the copied file exists and has the same mode and
# modification time bits.
file1, file2 = self._copy_file(shutil.copy2)
self.assertTrue(os.path.exists(file2))
file1_stat = os.stat(file1)
file2_stat = os.stat(file2)
self.assertEqual(file1_stat.st_mode, file2_stat.st_mode)
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(file1_stat, attr),
getattr(file2_stat, attr) + 1)
if hasattr(os, 'chflags') and hasattr(file1_stat, 'st_flags'):
self.assertEqual(getattr(file1_stat, 'st_flags'),
getattr(file2_stat, 'st_flags'))
@os_helper.skip_unless_symlink
def test_copy2_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
write_file(src, 'foo')
os.symlink(src, src_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXU | stat.S_IRWXO)
if hasattr(os, 'lchflags') and hasattr(stat, 'UF_NODUMP'):
os.lchflags(src_link, stat.UF_NODUMP)
src_stat = os.stat(src)
src_link_stat = os.lstat(src_link)
# follow
shutil.copy2(src_link, dst, follow_symlinks=True)
self.assertFalse(os.path.islink(dst))
self.assertEqual(read_file(src), read_file(dst))
os.remove(dst)
# don't follow
shutil.copy2(src_link, dst, follow_symlinks=False)
self.assertTrue(os.path.islink(dst))
self.assertEqual(os.readlink(dst), os.readlink(src_link))
dst_stat = os.lstat(dst)
if os.utime in os.supports_follow_symlinks:
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(src_link_stat, attr),
getattr(dst_stat, attr) + 1)
if hasattr(os, 'lchmod'):
self.assertEqual(src_link_stat.st_mode, dst_stat.st_mode)
self.assertNotEqual(src_stat.st_mode, dst_stat.st_mode)
if hasattr(os, 'lchflags') and hasattr(src_link_stat, 'st_flags'):
self.assertEqual(src_link_stat.st_flags, dst_stat.st_flags)
@os_helper.skip_unless_xattr
def test_copy2_xattr(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
write_file(src, 'foo')
os.setxattr(src, 'user.foo', b'42')
shutil.copy2(src, dst)
self.assertEqual(
os.getxattr(src, 'user.foo'),
os.getxattr(dst, 'user.foo'))
os.remove(dst)
def test_copy_return_value(self):
# copy and copy2 both return their destination path.
for fn in (shutil.copy, shutil.copy2):
src_dir = self.mkdtemp()
dst_dir = self.mkdtemp()
src = os.path.join(src_dir, 'foo')
write_file(src, 'foo')
rv = fn(src, dst_dir)
self.assertEqual(rv, os.path.join(dst_dir, 'foo'))
rv = fn(src, os.path.join(dst_dir, 'bar'))
self.assertEqual(rv, os.path.join(dst_dir, 'bar'))
def test_copy_dir(self):
self._test_copy_dir(shutil.copy)
def test_copy2_dir(self):
self._test_copy_dir(shutil.copy2)
def _test_copy_dir(self, copy_func):
src_dir = self.mkdtemp()
src_file = os.path.join(src_dir, 'foo')
dir2 = self.mkdtemp()
dst = os.path.join(src_dir, 'does_not_exist/')
write_file(src_file, 'foo')
if sys.platform == "win32":
err = PermissionError
else:
err = IsADirectoryError
self.assertRaises(err, copy_func, dir2, src_dir)
# raise *err* because of src rather than FileNotFoundError because of dst
self.assertRaises(err, copy_func, dir2, dst)
copy_func(src_file, dir2) # should not raise exceptions
### shutil.copyfile
@os_helper.skip_unless_symlink
def test_copyfile_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'src')
dst = os.path.join(tmp_dir, 'dst')
dst_link = os.path.join(tmp_dir, 'dst_link')
link = os.path.join(tmp_dir, 'link')
write_file(src, 'foo')
os.symlink(src, link)
# don't follow
shutil.copyfile(link, dst_link, follow_symlinks=False)
self.assertTrue(os.path.islink(dst_link))
self.assertEqual(os.readlink(link), os.readlink(dst_link))
# follow
shutil.copyfile(link, dst)
self.assertFalse(os.path.islink(dst))
@unittest.skipUnless(hasattr(os, 'link'), 'requires os.link')
def test_dont_copy_file_onto_link_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w', encoding='utf-8') as f:
f.write('cheddar')
try:
os.link(src, dst)
except PermissionError as e:
self.skipTest('os.link(): %s' % e)
self.assertRaises(shutil.SameFileError, shutil.copyfile, src, dst)
with open(src, 'r', encoding='utf-8') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@os_helper.skip_unless_symlink
def test_dont_copy_file_onto_symlink_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w', encoding='utf-8') as f:
f.write('cheddar')
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.SameFileError, shutil.copyfile, src, dst)
with open(src, 'r', encoding='utf-8') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
# Issue #3002: copyfile and copytree block indefinitely on named pipes
@unittest.skipUnless(hasattr(os, "mkfifo"), 'requires os.mkfifo()')
@unittest.skipIf(sys.platform == "vxworks",
"fifo requires special path on VxWorks")
def test_copyfile_named_pipe(self):
try:
os.mkfifo(TESTFN)
except PermissionError as e:
self.skipTest('os.mkfifo(): %s' % e)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
def test_copyfile_return_value(self):
# copytree returns its destination path.
src_dir = self.mkdtemp()
dst_dir = self.mkdtemp()
dst_file = os.path.join(dst_dir, 'bar')
src_file = os.path.join(src_dir, 'foo')
write_file(src_file, 'foo')
rv = shutil.copyfile(src_file, dst_file)
self.assertTrue(os.path.exists(rv))
self.assertEqual(read_file(src_file), read_file(dst_file))
def test_copyfile_same_file(self):
# copyfile() should raise SameFileError if the source and destination
# are the same.
src_dir = self.mkdtemp()
src_file = os.path.join(src_dir, 'foo')
write_file(src_file, 'foo')
self.assertRaises(SameFileError, shutil.copyfile, src_file, src_file)
# But Error should work too, to stay backward compatible.
self.assertRaises(Error, shutil.copyfile, src_file, src_file)
# Make sure file is not corrupted.
self.assertEqual(read_file(src_file), 'foo')
# If the "ignore_same_file" is set the "SameFileError" exception shouldn't be raised.
dst_file = shutil.copyfile(src_file, src_file, ignore_same_file=True)
# The return value should be the dst
self.assertEqual(dst_file, src_file)
# Make sure file is not corrupted.
self.assertEqual(read_file(dst_file), 'foo')
@unittest.skipIf(MACOS or SOLARIS or _winapi, 'On MACOS, Solaris and Windows the errors are not confusing (though different)')
def test_copyfile_nonexistent_dir(self):
# Issue 43219
src_dir = self.mkdtemp()
src_file = os.path.join(src_dir, 'foo')
dst = os.path.join(src_dir, 'does_not_exist/')
write_file(src_file, 'foo')
self.assertRaises(FileNotFoundError, shutil.copyfile, src_file, dst)
def test_copyfile_copy_dir(self):
# Issue 45234
# test copy() and copyfile() raising proper exceptions when src and/or
# dst are directories
src_dir = self.mkdtemp()
src_file = os.path.join(src_dir, 'foo')
dir2 = self.mkdtemp()
dst = os.path.join(src_dir, 'does_not_exist/')
write_file(src_file, 'foo')
if sys.platform == "win32":
err = PermissionError
else:
err = IsADirectoryError
self.assertRaises(err, shutil.copyfile, src_dir, dst)
self.assertRaises(err, shutil.copyfile, src_file, src_dir)
self.assertRaises(err, shutil.copyfile, dir2, src_dir)
class TestArchives(BaseTest, unittest.TestCase):
### shutil.make_archive
@support.requires_zlib()
def test_make_tarball(self):
# creating something to tar
root_dir, base_dir = self._create_files('')
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
# working with relative paths
work_dir = os.path.dirname(tmpdir2)
rel_base_name = os.path.join(os.path.basename(tmpdir2), 'archive')
with os_helper.change_cwd(work_dir):
base_name = os.path.abspath(rel_base_name)
tarball = make_archive(rel_base_name, 'gztar', root_dir, '.')
# check if the compressed tarball was created
self.assertEqual(tarball, base_name + '.tar.gz')
self.assertTrue(os.path.isfile(tarball))
self.assertTrue(tarfile.is_tarfile(tarball))
with tarfile.open(tarball, 'r:gz') as tf:
self.assertCountEqual(tf.getnames(),
['.', './sub', './sub2',
'./file1', './file2', './sub/file3'])
# trying an uncompressed one
with os_helper.change_cwd(work_dir):
tarball = make_archive(rel_base_name, 'tar', root_dir, '.')
self.assertEqual(tarball, base_name + '.tar')
self.assertTrue(os.path.isfile(tarball))
self.assertTrue(tarfile.is_tarfile(tarball))
with tarfile.open(tarball, 'r') as tf:
self.assertCountEqual(tf.getnames(),
['.', './sub', './sub2',
'./file1', './file2', './sub/file3'])
def _tarinfo(self, path):
with tarfile.open(path) as tar:
names = tar.getnames()
names.sort()
return tuple(names)
def _create_files(self, base_dir='dist'):
# creating something to tar
root_dir = self.mkdtemp()
dist = os.path.join(root_dir, base_dir)
os.makedirs(dist, exist_ok=True)
write_file((dist, 'file1'), 'xxx')
write_file((dist, 'file2'), 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
write_file((dist, 'sub', 'file3'), 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
if base_dir:
write_file((root_dir, 'outer'), 'xxx')
return root_dir, base_dir
@support.requires_zlib()
@unittest.skipUnless(shutil.which('tar'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
tarball = make_archive(base_name, 'gztar', root_dir, base_dir)
# check if the compressed tarball was created
self.assertEqual(tarball, base_name + '.tar.gz')
self.assertTrue(os.path.isfile(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(root_dir, 'archive2.tar')
tar_cmd = ['tar', '-cf', 'archive2.tar', base_dir]
subprocess.check_call(tar_cmd, cwd=root_dir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isfile(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
tarball = make_archive(base_name, 'tar', root_dir, base_dir)
self.assertEqual(tarball, base_name + '.tar')
self.assertTrue(os.path.isfile(tarball))
# now for a dry_run
tarball = make_archive(base_name, 'tar', root_dir, base_dir,
dry_run=True)
self.assertEqual(tarball, base_name + '.tar')
self.assertTrue(os.path.isfile(tarball))
@support.requires_zlib()
def test_make_zipfile(self):
# creating something to zip
root_dir, base_dir = self._create_files()
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
# working with relative paths
work_dir = os.path.dirname(tmpdir2)
rel_base_name = os.path.join(os.path.basename(tmpdir2), 'archive')
with os_helper.change_cwd(work_dir):
base_name = os.path.abspath(rel_base_name)
res = make_archive(rel_base_name, 'zip', root_dir)
self.assertEqual(res, base_name + '.zip')
self.assertTrue(os.path.isfile(res))
self.assertTrue(zipfile.is_zipfile(res))
with zipfile.ZipFile(res) as zf:
self.assertCountEqual(zf.namelist(),
['dist/', 'dist/sub/', 'dist/sub2/',
'dist/file1', 'dist/file2', 'dist/sub/file3',
'outer'])
with os_helper.change_cwd(work_dir):
base_name = os.path.abspath(rel_base_name)
res = make_archive(rel_base_name, 'zip', root_dir, base_dir)
self.assertEqual(res, base_name + '.zip')
self.assertTrue(os.path.isfile(res))
self.assertTrue(zipfile.is_zipfile(res))
with zipfile.ZipFile(res) as zf:
self.assertCountEqual(zf.namelist(),
['dist/', 'dist/sub/', 'dist/sub2/',
'dist/file1', 'dist/file2', 'dist/sub/file3'])
@support.requires_zlib()
@unittest.skipUnless(shutil.which('zip'),
'Need the zip command to run')
def test_zipfile_vs_zip(self):
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
archive = make_archive(base_name, 'zip', root_dir, base_dir)
# check if ZIP file was created
self.assertEqual(archive, base_name + '.zip')
self.assertTrue(os.path.isfile(archive))
# now create another ZIP file using `zip`
archive2 = os.path.join(root_dir, 'archive2.zip')
zip_cmd = ['zip', '-q', '-r', 'archive2.zip', base_dir]
subprocess.check_call(zip_cmd, cwd=root_dir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isfile(archive2))
# let's compare both ZIP files
with zipfile.ZipFile(archive) as zf:
names = zf.namelist()
with zipfile.ZipFile(archive2) as zf:
names2 = zf.namelist()
self.assertEqual(sorted(names), sorted(names2))
@support.requires_zlib()
@unittest.skipUnless(shutil.which('unzip'),
'Need the unzip command to run')
def test_unzip_zipfile(self):
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
archive = make_archive(base_name, 'zip', root_dir, base_dir)
# check if ZIP file was created
self.assertEqual(archive, base_name + '.zip')
self.assertTrue(os.path.isfile(archive))
# now check the ZIP file using `unzip -t`
zip_cmd = ['unzip', '-t', archive]
with os_helper.change_cwd(root_dir):
try:
subprocess.check_output(zip_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
details = exc.output.decode(errors="replace")
if 'unrecognized option: t' in details:
self.skipTest("unzip doesn't support -t")
msg = "{}\n\n**Unzip Output**\n{}"
self.fail(msg.format(exc, details))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@support.requires_zlib()
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.isfile(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.isfile(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.isfile(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.isfile(res))
@support.requires_zlib()
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
with os_helper.change_cwd(root_dir):
archive_name = make_archive(base_name, 'gztar', root_dir, 'dist',
owner=owner, group=group)
# check if the compressed tarball was created
self.assertTrue(os.path.isfile(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_make_tarfile_in_curdir(self):
# Issue #21280
root_dir = self.mkdtemp()
with os_helper.change_cwd(root_dir):
self.assertEqual(make_archive('test', 'tar'), 'test.tar')
self.assertTrue(os.path.isfile('test.tar'))
@support.requires_zlib()
def test_make_zipfile_in_curdir(self):
# Issue #21280
root_dir = self.mkdtemp()
with os_helper.change_cwd(root_dir):
self.assertEqual(make_archive('test', 'zip'), 'test.zip')
self.assertTrue(os.path.isfile('test.zip'))
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
### shutil.unpack_archive
def check_unpack_archive(self, format):
self.check_unpack_archive_with_converter(format, lambda path: path)
self.check_unpack_archive_with_converter(format, pathlib.Path)
self.check_unpack_archive_with_converter(format, FakePath)
def check_unpack_archive_with_converter(self, format, converter):
root_dir, base_dir = self._create_files()
expected = rlistdir(root_dir)
expected.remove('outer')
base_name = os.path.join(self.mkdtemp(), 'archive')
filename = make_archive(base_name, format, root_dir, base_dir)
# let's try to unpack it now
tmpdir2 = self.mkdtemp()
unpack_archive(converter(filename), converter(tmpdir2))
self.assertEqual(rlistdir(tmpdir2), expected)
# and again, this time with the format specified
tmpdir3 = self.mkdtemp()
unpack_archive(converter(filename), converter(tmpdir3), format=format)
self.assertEqual(rlistdir(tmpdir3), expected)
self.assertRaises(shutil.ReadError, unpack_archive, converter(TESTFN))
self.assertRaises(ValueError, unpack_archive, converter(TESTFN), format='xxx')
def test_unpack_archive_tar(self):
self.check_unpack_archive('tar')
@support.requires_zlib()
def test_unpack_archive_gztar(self):
self.check_unpack_archive('gztar')
@support.requires_bz2()
def test_unpack_archive_bztar(self):
self.check_unpack_archive('bztar')
@support.requires_lzma()
@unittest.skipIf(AIX and not _maxdataOK(), "AIX MAXDATA must be 0x20000000 or larger")
def test_unpack_archive_xztar(self):
self.check_unpack_archive('xztar')
@support.requires_zlib()
def test_unpack_archive_zip(self):
self.check_unpack_archive('zip')
def test_unpack_registry(self):
formats = get_unpack_formats()
def _boo(filename, extract_dir, extra):
self.assertEqual(extra, 1)
self.assertEqual(filename, 'stuff.boo')
self.assertEqual(extract_dir, 'xx')
register_unpack_format('Boo', ['.boo', '.b2'], _boo, [('extra', 1)])
unpack_archive('stuff.boo', 'xx')
# trying to register a .boo unpacker again
self.assertRaises(RegistryError, register_unpack_format, 'Boo2',
['.boo'], _boo)
# should work now
unregister_unpack_format('Boo')
register_unpack_format('Boo2', ['.boo'], _boo)
self.assertIn(('Boo2', ['.boo'], ''), get_unpack_formats())
self.assertNotIn(('Boo', ['.boo'], ''), get_unpack_formats())
# let's leave a clean state
unregister_unpack_format('Boo2')
self.assertEqual(get_unpack_formats(), formats)
class TestMisc(BaseTest, unittest.TestCase):
@unittest.skipUnless(hasattr(shutil, 'disk_usage'),
"disk_usage not available on this platform")
def test_disk_usage(self):
usage = shutil.disk_usage(os.path.dirname(__file__))
for attr in ('total', 'used', 'free'):
self.assertIsInstance(getattr(usage, attr), int)
self.assertGreater(usage.total, 0)
self.assertGreater(usage.used, 0)
self.assertGreaterEqual(usage.free, 0)
self.assertGreaterEqual(usage.total, usage.used)
self.assertGreater(usage.total, usage.free)
# bpo-32557: Check that disk_usage() also accepts a filename
shutil.disk_usage(__file__)
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
@unittest.skipUnless(hasattr(os, 'chown'), 'requires os.chown')
def test_chown(self):
dirname = self.mkdtemp()
filename = tempfile.mktemp(dir=dirname)
write_file(filename, 'testing chown function')
with self.assertRaises(ValueError):
shutil.chown(filename)
with self.assertRaises(LookupError):
shutil.chown(filename, user='non-existing username')
with self.assertRaises(LookupError):
shutil.chown(filename, group='non-existing groupname')
with self.assertRaises(TypeError):
shutil.chown(filename, b'spam')
with self.assertRaises(TypeError):
shutil.chown(filename, 3.14)
uid = os.getuid()
gid = os.getgid()
def check_chown(path, uid=None, gid=None):
s = os.stat(filename)
if uid is not None:
self.assertEqual(uid, s.st_uid)
if gid is not None:
self.assertEqual(gid, s.st_gid)
shutil.chown(filename, uid, gid)
check_chown(filename, uid, gid)
shutil.chown(filename, uid)
check_chown(filename, uid)
shutil.chown(filename, user=uid)
check_chown(filename, uid)
shutil.chown(filename, group=gid)
check_chown(filename, gid=gid)
shutil.chown(dirname, uid, gid)
check_chown(dirname, uid, gid)
shutil.chown(dirname, uid)
check_chown(dirname, uid)
shutil.chown(dirname, user=uid)
check_chown(dirname, uid)
shutil.chown(dirname, group=gid)
check_chown(dirname, gid=gid)
try:
user = pwd.getpwuid(uid)[0]
group = grp.getgrgid(gid)[0]
except KeyError:
# On some systems uid/gid cannot be resolved.
pass
else:
shutil.chown(filename, user, group)
check_chown(filename, uid, gid)
shutil.chown(dirname, user, group)
check_chown(dirname, uid, gid)
class TestWhich(BaseTest, unittest.TestCase):
def setUp(self):
self.temp_dir = self.mkdtemp(prefix="Tmp")
# Give the temp_file an ".exe" suffix for all.
# It's needed on Windows and not harmful on other platforms.
self.temp_file = tempfile.NamedTemporaryFile(dir=self.temp_dir,
prefix="Tmp",
suffix=".Exe")
os.chmod(self.temp_file.name, stat.S_IXUSR)
self.addCleanup(self.temp_file.close)
self.dir, self.file = os.path.split(self.temp_file.name)
self.env_path = self.dir
self.curdir = os.curdir
self.ext = ".EXE"
def test_basic(self):
# Given an EXE in a directory, it should be returned.
rv = shutil.which(self.file, path=self.dir)
self.assertEqual(rv, self.temp_file.name)
def test_absolute_cmd(self):
# When given the fully qualified path to an executable that exists,
# it should be returned.
rv = shutil.which(self.temp_file.name, path=self.temp_dir)
self.assertEqual(rv, self.temp_file.name)
def test_relative_cmd(self):
# When given the relative path with a directory part to an executable
# that exists, it should be returned.
base_dir, tail_dir = os.path.split(self.dir)
relpath = os.path.join(tail_dir, self.file)
with os_helper.change_cwd(path=base_dir):
rv = shutil.which(relpath, path=self.temp_dir)
self.assertEqual(rv, relpath)
# But it shouldn't be searched in PATH directories (issue #16957).
with os_helper.change_cwd(path=self.dir):
rv = shutil.which(relpath, path=base_dir)
self.assertIsNone(rv)
def test_cwd(self):
# Issue #16957
base_dir = os.path.dirname(self.dir)
with os_helper.change_cwd(path=self.dir):
rv = shutil.which(self.file, path=base_dir)
if sys.platform == "win32":
# Windows: current directory implicitly on PATH
self.assertEqual(rv, os.path.join(self.curdir, self.file))
else:
# Other platforms: shouldn't match in the current directory.
self.assertIsNone(rv)
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
'non-root user required')
def test_non_matching_mode(self):
# Set the file read-only and ask for writeable files.
os.chmod(self.temp_file.name, stat.S_IREAD)
if os.access(self.temp_file.name, os.W_OK):
self.skipTest("can't set the file read-only")
rv = shutil.which(self.file, path=self.dir, mode=os.W_OK)
self.assertIsNone(rv)
def test_relative_path(self):
base_dir, tail_dir = os.path.split(self.dir)
with os_helper.change_cwd(path=base_dir):
rv = shutil.which(self.file, path=tail_dir)
self.assertEqual(rv, os.path.join(tail_dir, self.file))
def test_nonexistent_file(self):
# Return None when no matching executable file is found on the path.
rv = shutil.which("foo.exe", path=self.dir)
self.assertIsNone(rv)
@unittest.skipUnless(sys.platform == "win32",
"pathext check is Windows-only")
def test_pathext_checking(self):
# Ask for the file without the ".exe" extension, then ensure that
# it gets found properly with the extension.
rv = shutil.which(self.file[:-4], path=self.dir)
self.assertEqual(rv, self.temp_file.name[:-4] + self.ext)
def test_environ_path(self):
with os_helper.EnvironmentVarGuard() as env:
env['PATH'] = self.env_path
rv = shutil.which(self.file)
self.assertEqual(rv, self.temp_file.name)
def test_environ_path_empty(self):
# PATH='': no match
with os_helper.EnvironmentVarGuard() as env:
env['PATH'] = ''
with unittest.mock.patch('os.confstr', return_value=self.dir, \
create=True), \
support.swap_attr(os, 'defpath', self.dir), \
os_helper.change_cwd(self.dir):
rv = shutil.which(self.file)
self.assertIsNone(rv)
def test_environ_path_cwd(self):
expected_cwd = os.path.basename(self.temp_file.name)
if sys.platform == "win32":
curdir = os.curdir
if isinstance(expected_cwd, bytes):
curdir = os.fsencode(curdir)
expected_cwd = os.path.join(curdir, expected_cwd)
# PATH=':': explicitly looks in the current directory
with os_helper.EnvironmentVarGuard() as env:
env['PATH'] = os.pathsep
with unittest.mock.patch('os.confstr', return_value=self.dir, \
create=True), \
support.swap_attr(os, 'defpath', self.dir):
rv = shutil.which(self.file)
self.assertIsNone(rv)
# look in current directory
with os_helper.change_cwd(self.dir):
rv = shutil.which(self.file)
self.assertEqual(rv, expected_cwd)
def test_environ_path_missing(self):
with os_helper.EnvironmentVarGuard() as env:
env.pop('PATH', None)
# without confstr
with unittest.mock.patch('os.confstr', side_effect=ValueError, \
create=True), \
support.swap_attr(os, 'defpath', self.dir):
rv = shutil.which(self.file)
self.assertEqual(rv, self.temp_file.name)
# with confstr
with unittest.mock.patch('os.confstr', return_value=self.dir, \
create=True), \
support.swap_attr(os, 'defpath', ''):
rv = shutil.which(self.file)
self.assertEqual(rv, self.temp_file.name)
def test_empty_path(self):
base_dir = os.path.dirname(self.dir)
with os_helper.change_cwd(path=self.dir), \
os_helper.EnvironmentVarGuard() as env:
env['PATH'] = self.env_path
rv = shutil.which(self.file, path='')
self.assertIsNone(rv)
def test_empty_path_no_PATH(self):
with os_helper.EnvironmentVarGuard() as env:
env.pop('PATH', None)
rv = shutil.which(self.file)
self.assertIsNone(rv)
@unittest.skipUnless(sys.platform == "win32", 'test specific to Windows')
def test_pathext(self):
ext = ".xyz"
temp_filexyz = tempfile.NamedTemporaryFile(dir=self.temp_dir,
prefix="Tmp2", suffix=ext)
os.chmod(temp_filexyz.name, stat.S_IXUSR)
self.addCleanup(temp_filexyz.close)
# strip path and extension
program = os.path.basename(temp_filexyz.name)
program = os.path.splitext(program)[0]
with os_helper.EnvironmentVarGuard() as env:
env['PATHEXT'] = ext
rv = shutil.which(program, path=self.temp_dir)
self.assertEqual(rv, temp_filexyz.name)
# Issue 40592: See https://bugs.python.org/issue40592
@unittest.skipUnless(sys.platform == "win32", 'test specific to Windows')
def test_pathext_with_empty_str(self):
ext = ".xyz"
temp_filexyz = tempfile.NamedTemporaryFile(dir=self.temp_dir,
prefix="Tmp2", suffix=ext)
self.addCleanup(temp_filexyz.close)
# strip path and extension
program = os.path.basename(temp_filexyz.name)
program = os.path.splitext(program)[0]
with os_helper.EnvironmentVarGuard() as env:
env['PATHEXT'] = f"{ext};" # note the ;
rv = shutil.which(program, path=self.temp_dir)
self.assertEqual(rv, temp_filexyz.name)
class TestWhichBytes(TestWhich):
def setUp(self):
TestWhich.setUp(self)
self.dir = os.fsencode(self.dir)
self.file = os.fsencode(self.file)
self.temp_file.name = os.fsencode(self.temp_file.name)
self.curdir = os.fsencode(self.curdir)
self.ext = os.fsencode(self.ext)
class TestMove(BaseTest, unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = self.mkdtemp()
self.dst_dir = self.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
with open(self.src_file, "wb") as f:
f.write(b"spam")
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
def test_move_file_to_dir_pathlike_src(self):
# Move a pathlike file to another location on the same filesystem.
src = pathlib.Path(self.src_file)
self._check_move_file(src, self.dst_dir, self.dst_file)
def test_move_file_to_dir_pathlike_dst(self):
# Move a file to another pathlike location on the same filesystem.
dst = pathlib.Path(self.dst_dir)
self._check_move_file(self.src_file, dst, self.dst_file)
@mock_rename
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
self.test_move_file()
@mock_rename
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
self.test_move_file_to_dir()
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp(dir=self.mkdtemp())
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
os_helper.rmtree(dst_dir)
@mock_rename
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
self.test_move_dir()
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
@mock_rename
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
self.test_move_dir_to_dir()
def test_move_dir_sep_to_dir(self):
self._check_move_dir(self.src_dir + os.path.sep, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
@unittest.skipUnless(os.path.altsep, 'requires os.path.altsep')
def test_move_dir_altsep_to_dir(self):
self._check_move_dir(self.src_dir + os.path.altsep, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
os_helper.rmtree(TESTFN)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
os_helper.rmtree(TESTFN)
@os_helper.skip_unless_symlink
@mock_rename
def test_move_file_symlink(self):
dst = os.path.join(self.src_dir, 'bar')
os.symlink(self.src_file, dst)
shutil.move(dst, self.dst_file)
self.assertTrue(os.path.islink(self.dst_file))
self.assertTrue(os.path.samefile(self.src_file, self.dst_file))
@os_helper.skip_unless_symlink
@mock_rename
def test_move_file_symlink_to_dir(self):
filename = "bar"
dst = os.path.join(self.src_dir, filename)
os.symlink(self.src_file, dst)
shutil.move(dst, self.dst_dir)
final_link = os.path.join(self.dst_dir, filename)
self.assertTrue(os.path.islink(final_link))
self.assertTrue(os.path.samefile(self.src_file, final_link))
@os_helper.skip_unless_symlink
@mock_rename
def test_move_dangling_symlink(self):
src = os.path.join(self.src_dir, 'baz')
dst = os.path.join(self.src_dir, 'bar')
os.symlink(src, dst)
dst_link = os.path.join(self.dst_dir, 'quux')
shutil.move(dst, dst_link)
self.assertTrue(os.path.islink(dst_link))
self.assertEqual(os.path.realpath(src), os.path.realpath(dst_link))
@os_helper.skip_unless_symlink
@mock_rename
def test_move_dir_symlink(self):
src = os.path.join(self.src_dir, 'baz')
dst = os.path.join(self.src_dir, 'bar')
os.mkdir(src)
os.symlink(src, dst)
dst_link = os.path.join(self.dst_dir, 'quux')
shutil.move(dst, dst_link)
self.assertTrue(os.path.islink(dst_link))
self.assertTrue(os.path.samefile(src, dst_link))
def test_move_return_value(self):
rv = shutil.move(self.src_file, self.dst_dir)
self.assertEqual(rv,
os.path.join(self.dst_dir, os.path.basename(self.src_file)))
def test_move_as_rename_return_value(self):
rv = shutil.move(self.src_file, os.path.join(self.dst_dir, 'bar'))
self.assertEqual(rv, os.path.join(self.dst_dir, 'bar'))
@mock_rename
def test_move_file_special_function(self):
moved = []
def _copy(src, dst):
moved.append((src, dst))
shutil.move(self.src_file, self.dst_dir, copy_function=_copy)
self.assertEqual(len(moved), 1)
@mock_rename
def test_move_dir_special_function(self):
moved = []
def _copy(src, dst):
moved.append((src, dst))
os_helper.create_empty_file(os.path.join(self.src_dir, 'child'))
os_helper.create_empty_file(os.path.join(self.src_dir, 'child1'))
shutil.move(self.src_dir, self.dst_dir, copy_function=_copy)
self.assertEqual(len(moved), 3)
def test_move_dir_caseinsensitive(self):
# Renames a folder to the same name
# but a different case.
self.src_dir = self.mkdtemp()
dst_dir = os.path.join(
os.path.dirname(self.src_dir),
os.path.basename(self.src_dir).upper())
self.assertNotEqual(self.src_dir, dst_dir)
try:
shutil.move(self.src_dir, dst_dir)
self.assertTrue(os.path.isdir(dst_dir))
finally:
os.rmdir(dst_dir)
@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() == 0
and hasattr(os, 'lchflags')
and hasattr(stat, 'SF_IMMUTABLE')
and hasattr(stat, 'UF_OPAQUE'),
'root privileges required')
def test_move_dir_permission_denied(self):
# bpo-42782: shutil.move should not create destination directories
# if the source directory cannot be removed.
try:
os.mkdir(TESTFN_SRC)
os.lchflags(TESTFN_SRC, stat.SF_IMMUTABLE)
# Testing on an empty immutable directory
# TESTFN_DST should not exist if shutil.move failed
self.assertRaises(PermissionError, shutil.move, TESTFN_SRC, TESTFN_DST)
self.assertFalse(TESTFN_DST in os.listdir())
# Create a file and keep the directory immutable
os.lchflags(TESTFN_SRC, stat.UF_OPAQUE)
os_helper.create_empty_file(os.path.join(TESTFN_SRC, 'child'))
os.lchflags(TESTFN_SRC, stat.SF_IMMUTABLE)
# Testing on a non-empty immutable directory
# TESTFN_DST should not exist if shutil.move failed
self.assertRaises(PermissionError, shutil.move, TESTFN_SRC, TESTFN_DST)
self.assertFalse(TESTFN_DST in os.listdir())
finally:
if os.path.exists(TESTFN_SRC):
os.lchflags(TESTFN_SRC, stat.UF_OPAQUE)
os_helper.rmtree(TESTFN_SRC)
if os.path.exists(TESTFN_DST):
os.lchflags(TESTFN_DST, stat.UF_OPAQUE)
os_helper.rmtree(TESTFN_DST)
class TestCopyFile(unittest.TestCase):
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise OSError("Cannot close")
return self._suppress_at_exit
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise OSError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
with support.swap_attr(shutil, 'open', _open):
with self.assertRaises(OSError):
shutil.copyfile('srcfile', 'destfile')
@unittest.skipIf(MACOS, "skipped on macOS")
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise OSError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
with support.swap_attr(shutil, 'open', _open):
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is OSError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
@unittest.skipIf(MACOS, "skipped on macOS")
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
with support.swap_attr(shutil, 'open', _open):
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is OSError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
@unittest.skipIf(MACOS, "skipped on macOS")
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
with support.swap_attr(shutil, 'open', _open):
with self.assertRaises(OSError):
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
class TestCopyFileObj(unittest.TestCase):
FILESIZE = 2 * 1024 * 1024
@classmethod
def setUpClass(cls):
write_test_file(TESTFN, cls.FILESIZE)
@classmethod
def tearDownClass(cls):
os_helper.unlink(TESTFN)
os_helper.unlink(TESTFN2)
def tearDown(self):
os_helper.unlink(TESTFN2)
@contextlib.contextmanager
def get_files(self):
with open(TESTFN, "rb") as src:
with open(TESTFN2, "wb") as dst:
yield (src, dst)
def assert_files_eq(self, src, dst):
with open(src, 'rb') as fsrc:
with open(dst, 'rb') as fdst:
self.assertEqual(fsrc.read(), fdst.read())
def test_content(self):
with self.get_files() as (src, dst):
shutil.copyfileobj(src, dst)
self.assert_files_eq(TESTFN, TESTFN2)
def test_file_not_closed(self):
with self.get_files() as (src, dst):
shutil.copyfileobj(src, dst)
assert not src.closed
assert not dst.closed
def test_file_offset(self):
with self.get_files() as (src, dst):
shutil.copyfileobj(src, dst)
self.assertEqual(src.tell(), self.FILESIZE)
self.assertEqual(dst.tell(), self.FILESIZE)
@unittest.skipIf(os.name != 'nt', "Windows only")
def test_win_impl(self):
# Make sure alternate Windows implementation is called.
with unittest.mock.patch("shutil._copyfileobj_readinto") as m:
shutil.copyfile(TESTFN, TESTFN2)
assert m.called
# File size is 2 MiB but max buf size should be 1 MiB.
self.assertEqual(m.call_args[0][2], 1 * 1024 * 1024)
# If file size < 1 MiB memoryview() length must be equal to
# the actual file size.
with tempfile.NamedTemporaryFile(dir=os.getcwd(), delete=False) as f:
f.write(b'foo')
fname = f.name
self.addCleanup(os_helper.unlink, fname)
with unittest.mock.patch("shutil._copyfileobj_readinto") as m:
shutil.copyfile(fname, TESTFN2)
self.assertEqual(m.call_args[0][2], 3)
# Empty files should not rely on readinto() variant.
with tempfile.NamedTemporaryFile(dir=os.getcwd(), delete=False) as f:
pass
fname = f.name
self.addCleanup(os_helper.unlink, fname)
with unittest.mock.patch("shutil._copyfileobj_readinto") as m:
shutil.copyfile(fname, TESTFN2)
assert not m.called
self.assert_files_eq(fname, TESTFN2)
class _ZeroCopyFileTest(object):
"""Tests common to all zero-copy APIs."""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
FILEDATA = b""
PATCHPOINT = ""
@classmethod
def setUpClass(cls):
write_test_file(TESTFN, cls.FILESIZE)
with open(TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
os_helper.unlink(TESTFN)
def tearDown(self):
os_helper.unlink(TESTFN2)
@contextlib.contextmanager
def get_files(self):
with open(TESTFN, "rb") as src:
with open(TESTFN2, "wb") as dst:
yield (src, dst)
def zerocopy_fun(self, *args, **kwargs):
raise NotImplementedError("must be implemented in subclass")
def reset(self):
self.tearDown()
self.tearDownClass()
self.setUpClass()
self.setUp()
# ---
def test_regular_copy(self):
with self.get_files() as (src, dst):
self.zerocopy_fun(src, dst)
self.assertEqual(read_file(TESTFN2, binary=True), self.FILEDATA)
# Make sure the fallback function is not called.
with self.get_files() as (src, dst):
with unittest.mock.patch('shutil.copyfileobj') as m:
shutil.copyfile(TESTFN, TESTFN2)
assert not m.called
def test_same_file(self):
self.addCleanup(self.reset)
with self.get_files() as (src, dst):
with self.assertRaises(Exception):
self.zerocopy_fun(src, src)
# Make sure src file is not corrupted.
self.assertEqual(read_file(TESTFN, binary=True), self.FILEDATA)
def test_non_existent_src(self):
name = tempfile.mktemp(dir=os.getcwd())
with self.assertRaises(FileNotFoundError) as cm:
shutil.copyfile(name, "new")
self.assertEqual(cm.exception.filename, name)
def test_empty_file(self):
srcname = TESTFN + 'src'
dstname = TESTFN + 'dst'
self.addCleanup(lambda: os_helper.unlink(srcname))
self.addCleanup(lambda: os_helper.unlink(dstname))
with open(srcname, "wb"):
pass
with open(srcname, "rb") as src:
with open(dstname, "wb") as dst:
self.zerocopy_fun(src, dst)
self.assertEqual(read_file(dstname, binary=True), b"")
def test_unhandled_exception(self):
with unittest.mock.patch(self.PATCHPOINT,
side_effect=ZeroDivisionError):
self.assertRaises(ZeroDivisionError,
shutil.copyfile, TESTFN, TESTFN2)
def test_exception_on_first_call(self):
# Emulate a case where the first call to the zero-copy
# function raises an exception in which case the function is
# supposed to give up immediately.
with unittest.mock.patch(self.PATCHPOINT,
side_effect=OSError(errno.EINVAL, "yo")):
with self.get_files() as (src, dst):
with self.assertRaises(_GiveupOnFastCopy):
self.zerocopy_fun(src, dst)
def test_filesystem_full(self):
# Emulate a case where filesystem is full and sendfile() fails
# on first call.
with unittest.mock.patch(self.PATCHPOINT,
side_effect=OSError(errno.ENOSPC, "yo")):
with self.get_files() as (src, dst):
self.assertRaises(OSError, self.zerocopy_fun, src, dst)
@unittest.skipIf(not SUPPORTS_SENDFILE, 'os.sendfile() not supported')
class TestZeroCopySendfile(_ZeroCopyFileTest, unittest.TestCase):
PATCHPOINT = "os.sendfile"
def zerocopy_fun(self, fsrc, fdst):
return shutil._fastcopy_sendfile(fsrc, fdst)
def test_non_regular_file_src(self):
with io.BytesIO(self.FILEDATA) as src:
with open(TESTFN2, "wb") as dst:
with self.assertRaises(_GiveupOnFastCopy):
self.zerocopy_fun(src, dst)
shutil.copyfileobj(src, dst)
self.assertEqual(read_file(TESTFN2, binary=True), self.FILEDATA)
def test_non_regular_file_dst(self):
with open(TESTFN, "rb") as src:
with io.BytesIO() as dst:
with self.assertRaises(_GiveupOnFastCopy):
self.zerocopy_fun(src, dst)
shutil.copyfileobj(src, dst)
dst.seek(0)
self.assertEqual(dst.read(), self.FILEDATA)
def test_exception_on_second_call(self):
def sendfile(*args, **kwargs):
if not flag:
flag.append(None)
return orig_sendfile(*args, **kwargs)
else:
raise OSError(errno.EBADF, "yo")
flag = []
orig_sendfile = os.sendfile
with unittest.mock.patch('os.sendfile', create=True,
side_effect=sendfile):
with self.get_files() as (src, dst):
with self.assertRaises(OSError) as cm:
shutil._fastcopy_sendfile(src, dst)
assert flag
self.assertEqual(cm.exception.errno, errno.EBADF)
def test_cant_get_size(self):
# Emulate a case where src file size cannot be determined.
# Internally bufsize will be set to a small value and
# sendfile() will be called repeatedly.
with unittest.mock.patch('os.fstat', side_effect=OSError) as m:
with self.get_files() as (src, dst):
shutil._fastcopy_sendfile(src, dst)
assert m.called
self.assertEqual(read_file(TESTFN2, binary=True), self.FILEDATA)
def test_small_chunks(self):
# Force internal file size detection to be smaller than the
# actual file size. We want to force sendfile() to be called
# multiple times, also in order to emulate a src fd which gets
# bigger while it is being copied.
mock = unittest.mock.Mock()
mock.st_size = 65536 + 1
with unittest.mock.patch('os.fstat', return_value=mock) as m:
with self.get_files() as (src, dst):
shutil._fastcopy_sendfile(src, dst)
assert m.called
self.assertEqual(read_file(TESTFN2, binary=True), self.FILEDATA)
def test_big_chunk(self):
# Force internal file size detection to be +100MB bigger than
# the actual file size. Make sure sendfile() does not rely on
# file size value except for (maybe) a better throughput /
# performance.
mock = unittest.mock.Mock()
mock.st_size = self.FILESIZE + (100 * 1024 * 1024)
with unittest.mock.patch('os.fstat', return_value=mock) as m:
with self.get_files() as (src, dst):
shutil._fastcopy_sendfile(src, dst)
assert m.called
self.assertEqual(read_file(TESTFN2, binary=True), self.FILEDATA)
def test_blocksize_arg(self):
with unittest.mock.patch('os.sendfile',
side_effect=ZeroDivisionError) as m:
self.assertRaises(ZeroDivisionError,
shutil.copyfile, TESTFN, TESTFN2)
blocksize = m.call_args[0][3]
# Make sure file size and the block size arg passed to
# sendfile() are the same.
self.assertEqual(blocksize, os.path.getsize(TESTFN))
# ...unless we're dealing with a small file.
os_helper.unlink(TESTFN2)
write_file(TESTFN2, b"hello", binary=True)
self.addCleanup(os_helper.unlink, TESTFN2 + '3')
self.assertRaises(ZeroDivisionError,
shutil.copyfile, TESTFN2, TESTFN2 + '3')
blocksize = m.call_args[0][3]
self.assertEqual(blocksize, 2 ** 23)
def test_file2file_not_supported(self):
# Emulate a case where sendfile() only support file->socket
# fds. In such a case copyfile() is supposed to skip the
# fast-copy attempt from then on.
assert shutil._USE_CP_SENDFILE
try:
with unittest.mock.patch(
self.PATCHPOINT,
side_effect=OSError(errno.ENOTSOCK, "yo")) as m:
with self.get_files() as (src, dst):
with self.assertRaises(_GiveupOnFastCopy):
shutil._fastcopy_sendfile(src, dst)
assert m.called
assert not shutil._USE_CP_SENDFILE
with unittest.mock.patch(self.PATCHPOINT) as m:
shutil.copyfile(TESTFN, TESTFN2)
assert not m.called
finally:
shutil._USE_CP_SENDFILE = True
@unittest.skipIf(not MACOS, 'macOS only')
class TestZeroCopyMACOS(_ZeroCopyFileTest, unittest.TestCase):
PATCHPOINT = "posix._fcopyfile"
def zerocopy_fun(self, src, dst):
return shutil._fastcopy_fcopyfile(src, dst, posix._COPYFILE_DATA)
class TestGetTerminalSize(unittest.TestCase):
def test_does_not_crash(self):
"""Check if get_terminal_size() returns a meaningful value.
There's no easy portable way to actually check the size of the
terminal, so let's check if it returns something sensible instead.
"""
size = shutil.get_terminal_size()
self.assertGreaterEqual(size.columns, 0)
self.assertGreaterEqual(size.lines, 0)
def test_os_environ_first(self):
"Check if environment variables have precedence"
with os_helper.EnvironmentVarGuard() as env:
env['COLUMNS'] = '777'
del env['LINES']
size = shutil.get_terminal_size()
self.assertEqual(size.columns, 777)
with os_helper.EnvironmentVarGuard() as env:
del env['COLUMNS']
env['LINES'] = '888'
size = shutil.get_terminal_size()
self.assertEqual(size.lines, 888)
def test_bad_environ(self):
with os_helper.EnvironmentVarGuard() as env:
env['COLUMNS'] = 'xxx'
env['LINES'] = 'yyy'
size = shutil.get_terminal_size()
self.assertGreaterEqual(size.columns, 0)
self.assertGreaterEqual(size.lines, 0)
@unittest.skipUnless(os.isatty(sys.__stdout__.fileno()), "not on tty")
@unittest.skipUnless(hasattr(os, 'get_terminal_size'),
'need os.get_terminal_size()')
def test_stty_match(self):
"""Check if stty returns the same results ignoring env
This test will fail if stdin and stdout are connected to
different terminals with different sizes. Nevertheless, such
situations should be pretty rare.
"""
try:
size = subprocess.check_output(['stty', 'size']).decode().split()
except (FileNotFoundError, PermissionError,
subprocess.CalledProcessError):
self.skipTest("stty invocation failed")
expected = (int(size[1]), int(size[0])) # reversed order
with os_helper.EnvironmentVarGuard() as env:
del env['LINES']
del env['COLUMNS']
actual = shutil.get_terminal_size()
self.assertEqual(expected, actual)
def test_fallback(self):
with os_helper.EnvironmentVarGuard() as env:
del env['LINES']
del env['COLUMNS']
# sys.__stdout__ has no fileno()
with support.swap_attr(sys, '__stdout__', None):
size = shutil.get_terminal_size(fallback=(10, 20))
self.assertEqual(size.columns, 10)
self.assertEqual(size.lines, 20)
# sys.__stdout__ is not a terminal on Unix
# or fileno() not in (0, 1, 2) on Windows
with open(os.devnull, 'w', encoding='utf-8') as f, \
support.swap_attr(sys, '__stdout__', f):
size = shutil.get_terminal_size(fallback=(30, 40))
self.assertEqual(size.columns, 30)
self.assertEqual(size.lines, 40)
class PublicAPITests(unittest.TestCase):
"""Ensures that the correct values are exposed in the public API."""
def test_module_all_attribute(self):
self.assertTrue(hasattr(shutil, '__all__'))
target_api = ['copyfileobj', 'copyfile', 'copymode', 'copystat',
'copy', 'copy2', 'copytree', 'move', 'rmtree', 'Error',
'SpecialFileError', 'ExecError', 'make_archive',
'get_archive_formats', 'register_archive_format',
'unregister_archive_format', 'get_unpack_formats',
'register_unpack_format', 'unregister_unpack_format',
'unpack_archive', 'ignore_patterns', 'chown', 'which',
'get_terminal_size', 'SameFileError']
if hasattr(os, 'statvfs') or os.name == 'nt':
target_api.append('disk_usage')
self.assertEqual(set(shutil.__all__), set(target_api))
if __name__ == '__main__':
unittest.main()
| 39.988785
| 130
| 0.6056
|
3f0f7b1e59728f5f7e58549899b60632e4a02b7e
| 3,766
|
py
|
Python
|
gaps/edge/image_analysis.py
|
weiyuhan/gaps
|
47582d34a6d6395a0b0b75dffca6c94839d4abf1
|
[
"MIT"
] | null | null | null |
gaps/edge/image_analysis.py
|
weiyuhan/gaps
|
47582d34a6d6395a0b0b75dffca6c94839d4abf1
|
[
"MIT"
] | null | null | null |
gaps/edge/image_analysis.py
|
weiyuhan/gaps
|
47582d34a6d6395a0b0b75dffca6c94839d4abf1
|
[
"MIT"
] | 1
|
2020-05-20T12:14:56.000Z
|
2020-05-20T12:14:56.000Z
|
from gaps.edge.fitness import dissimilarity_measure
from gaps.progress_bar import print_progress
class ImageAnalysis(object):
"""Cache for dissimilarity measures of individuals
Class have static lookup table where keys are Piece's id's.
For each pair puzzle pieces there is a map with values representing
dissimilarity measure between them. Each next generation have greater chance to use
cached value instead of calculating measure again.
Attributes:
dissimilarity_measures Dictionary with cached dissimilarity measures for puzzle pieces
best_match_table Dictionary with best matching piece for each edge and each piece
"""
dissimilarity_measures = {}
best_match_table = {}
@classmethod
def analyze_image(cls, pieces):
for piece in pieces:
# For each edge we keep best matches as a sorted list.
# Edges with lower dissimilarity_measure have higher priority.
cls.best_match_table[piece.id] = {
"T": [],
"R": [],
"D": [],
"L": []
}
def update_best_match_table(first_piece, second_piece):
measure = dissimilarity_measure(first_piece, second_piece, orientation)
cls.put_dissimilarity((first_piece.id, second_piece.id), orientation, measure)
cls.best_match_table[second_piece.id][orientation[0]].append((first_piece.id, measure))
cls.best_match_table[first_piece.id][orientation[1]].append((second_piece.id, measure))
# Calculate dissimilarity measures and best matches for each piece.
iterations = len(pieces) - 1
for first in range(iterations):
print_progress(first, iterations - 1, prefix="=== Analyzing image:")
for second in range(first + 1, len(pieces)):
for orientation in ["LR", "TD"]:
update_best_match_table(pieces[first], pieces[second])
update_best_match_table(pieces[second], pieces[first])
for piece in pieces:
for orientation in ["T", "L", "R", "D"]:
cls.best_match_table[piece.id][orientation].sort(key=lambda x: x[1])
@classmethod
def put_dissimilarity(cls, ids, orientation, value):
"""Puts a new value in lookup table for given pieces
:params ids: Identfiers of puzzle pieces
:params orientation: Orientation of puzzle pieces. Possible values are:
'LR' => 'Left-Right'
'TD' => 'Top-Down'
:params value: Value of dissimilarity measure
Usage::
>>> from gaps.image_analysis import ImageAnalysis
>>> ImageAnalysis.put_dissimilarity([1, 2], "TD", 42)
"""
if ids not in cls.dissimilarity_measures:
cls.dissimilarity_measures[ids] = {}
cls.dissimilarity_measures[ids][orientation] = value
@classmethod
def get_dissimilarity(cls, ids, orientation):
"""Returns previously cached dissimilarity measure for input pieces
:params ids: Identfiers of puzzle pieces
:params orientation: Orientation of puzzle pieces. Possible values are:
'LR' => 'Left-Right'
'TD' => 'Top-Down'
Usage::
>>> from gaps.image_analysis import ImageAnalysis
>>> ImageAnalysis.get_dissimilarity([1, 2], "TD")
"""
return cls.dissimilarity_measures[ids][orientation]
@classmethod
def best_match(cls, piece, orientation):
""""Returns best match piece for given piece and orientation"""
return cls.best_match_table[piece][orientation][0][0]
| 40.934783
| 99
| 0.624004
|
4d0f8765c2efe7296100a5da83646e63ae082611
| 18,419
|
py
|
Python
|
hypernets/tabular/dataframe_mapper.py
|
DataCanvasIO/Hypernets
|
4dd8b9846a3e205f03a23a770169cd1d14a98f03
|
[
"Apache-2.0"
] | 1,080
|
2020-06-22T07:44:22.000Z
|
2022-03-22T07:46:48.000Z
|
hypernets/tabular/dataframe_mapper.py
|
DataCanvasIO/Hypernets
|
4dd8b9846a3e205f03a23a770169cd1d14a98f03
|
[
"Apache-2.0"
] | 24
|
2020-08-06T02:06:37.000Z
|
2022-03-31T03:34:35.000Z
|
hypernets/tabular/dataframe_mapper.py
|
DataCanvasIO/Hypernets
|
4dd8b9846a3e205f03a23a770169cd1d14a98f03
|
[
"Apache-2.0"
] | 170
|
2020-08-14T08:39:18.000Z
|
2022-03-23T12:58:17.000Z
|
# -*- coding:utf-8 -*-
"""
Adapted from: https://github.com/scikit-learn-contrib/sklearn-pandas
1. Fix the problem of confusion of column names
2. Support `columns` is a callable object
"""
import contextlib
import numpy as np
import pandas as pd
from scipy import sparse as _sparse
from sklearn.base import BaseEstimator
from sklearn.pipeline import _name_estimators, Pipeline
from sklearn.utils import tosequence
from hypernets.utils import logging
logger = logging.get_logger(__name__)
def _call_fit(fit_method, X, y=None, **kwargs):
"""
helper function, calls the fit or fit_transform method with the correct
number of parameters
fit_method: fit or fit_transform method of the transformer
X: the data to fit
y: the target vector relative to X, optional
kwargs: any keyword arguments to the fit method
return: the result of the fit or fit_transform method
WARNING: if this function raises a TypeError exception, test the fit
or fit_transform method passed to it in isolation as _call_fit will not
distinguish TypeError due to incorrect number of arguments from
other TypeError
"""
try:
return fit_method(X, y, **kwargs)
except TypeError:
# fit takes only one argument
return fit_method(X, **kwargs)
class TransformerPipeline(Pipeline):
"""
Pipeline that expects all steps to be transformers taking a single X
argument, an optional y argument, and having fit and transform methods.
Code is copied from sklearn's Pipeline
"""
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError(
"Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
estimator = estimators[-1]
for e in estimators:
if (not (hasattr(e, "fit") or hasattr(e, "fit_transform")) or not
hasattr(e, "transform")):
raise TypeError("All steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (e, type(e)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in fit_params.items():
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = _call_fit(transform.fit_transform,
Xt, y, **fit_params_steps[name])
else:
Xt = _call_fit(transform.fit,
Xt, y, **fit_params_steps[name]).transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
Xt, fit_params = self._pre_transform(X, y, **fit_params)
_call_fit(self.steps[-1][-1].fit, Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return _call_fit(self.steps[-1][-1].fit_transform,
Xt, y, **fit_params)
else:
return _call_fit(self.steps[-1][-1].fit,
Xt, y, **fit_params).transform(Xt)
def make_transformer_pipeline(*steps):
"""Construct a TransformerPipeline from the given estimators.
"""
return TransformerPipeline(_name_estimators(steps))
def _build_transformer(transformers):
if isinstance(transformers, list):
transformers = make_transformer_pipeline(*transformers)
return transformers
def _build_feature(columns, transformers, options={}):
return (columns, _build_transformer(transformers), options)
def _get_feature_names(estimator, columns=None):
"""
Attempt to extract feature names based on a given estimator
"""
if hasattr(estimator, 'get_feature_names'):
return estimator.get_feature_names(columns)
if hasattr(estimator, 'classes_'):
return estimator.classes_
return None
@contextlib.contextmanager
def add_column_names_to_exception(column_names):
# Stolen from https://stackoverflow.com/a/17677938/356729
try:
yield
except Exception as ex:
if ex.args:
msg = u'{}: {}'.format(column_names, ex.args[0])
else:
msg = str(column_names)
ex.args = (msg,) + ex.args[1:]
raise
class DataFrameMapper(BaseEstimator):
"""
Map Pandas data frame column subsets to their own
sklearn transformation.
"""
def __init__(self, features, default=False, df_out=False, input_df=False, df_out_dtype_transforms=None):
"""
Params:
features a list of tuples with features definitions.
The first element is the pandas column selector. This can
be a string (for one column) or a list of strings.
The second element is an object that supports
sklearn's transform interface, or a list of such objects.
The third element is optional and, if present, must be
a dictionary with the options to apply to the
transformation. Example: {'alias': 'day_of_week'}
default default transformer to apply to the columns not
explicitly selected in the mapper. If False (default),
discard them. If None, pass them through untouched. Any
other transformer will be applied to all the unselected
columns as a whole, taken as a 2d-array.
df_out return a pandas data frame, with each column named using
the pandas column that created it (if there's only one
input and output) or the input columns joined with '_'
if there's multiple inputs, and the name concatenated with
'_1', '_2' etc if there's multiple outputs.
input_df If ``True`` pass the selected columns to the transformers
as a pandas DataFrame or Series. Otherwise pass them as a
numpy array. Defaults to ``False``.
"""
self.features = features
self.default = default
self.df_out = df_out
self.input_df = input_df
self.df_out_dtype_transforms = df_out_dtype_transforms
# fitted
self.fitted_features_ = None
@staticmethod
def _build(features, default):
if isinstance(features, list):
built_features = [_build_feature(*f) for f in features]
else:
built_features = features
built_default = _build_transformer(default)
return built_features, built_default
def fit(self, X, y=None):
built_features, built_default = self._build(self.features, self.default)
fitted_features = []
selected_columns = []
for columns_def, transformers, options in built_features:
logger.debug(f'columns:({columns_def}), transformers:({transformers}), options:({options})')
if callable(columns_def):
columns = columns_def(X)
elif isinstance(columns_def, str):
columns = [columns_def]
else:
columns = columns_def
if isinstance(columns, (list, tuple)):
columns = [c for c in columns if c not in selected_columns]
fitted_features.append((columns, transformers, options))
if columns is None or len(columns) <= 0:
continue
selected_columns += columns
if transformers is not None:
input_df = options.get('input_df', self.input_df)
with add_column_names_to_exception(columns):
Xt = self._get_col_subset(X, columns, input_df)
_call_fit(transformers.fit, Xt, y)
# print(f'{transformers}:{Xt.dtypes}')
# handle features not explicitly selected
if built_default is not False and len(X.columns) > len(selected_columns):
unselected_columns = [c for c in X.columns.to_list() if c not in selected_columns]
if built_default is not None:
with add_column_names_to_exception(unselected_columns):
Xt = self._get_col_subset(X, unselected_columns, self.input_df)
_call_fit(built_default.fit, Xt, y)
fitted_features.append((unselected_columns, built_default, {}))
self.fitted_features_ = fitted_features
return self
def transform(self, X):
selected_columns = []
transformed_columns = []
extracted = []
for columns, transformers, options in self.fitted_features_:
if columns is None or len(columns) < 1:
continue
selected_columns += columns
input_df = options.get('input_df', self.input_df)
alias = options.get('alias')
Xt = self._get_col_subset(X, columns, input_df)
if transformers is not None:
with add_column_names_to_exception(columns):
# print(f'before ---- {transformers}:{Xt.dtypes}')
Xt = transformers.transform(Xt)
# print(f'after ---- {transformers}:{pd.DataFrame(Xt).dtypes}')
extracted.append(self._fix_feature(Xt))
transformed_columns += self._get_names(columns, transformers, Xt, alias)
return self._to_transform_result(X, extracted, transformed_columns)
def fit_transform(self, X, y=None, *fit_args):
fitted_features = []
selected_columns = []
transformed_columns = []
extracted = []
built_features, built_default = self._build(self.features, self.default)
for columns_def, transformers, options in built_features:
if callable(columns_def):
columns = columns_def(X)
elif isinstance(columns_def, str):
columns = [columns_def]
else:
columns = columns_def
if isinstance(columns, (list, tuple)) and len(set(selected_columns).intersection(set(columns))) > 0:
columns = [c for c in columns if c not in selected_columns]
if columns is None or len(columns) < 1:
continue
fitted_features.append((columns, transformers, options))
selected_columns += columns
if logger.is_debug_enabled():
logger.debug(f'fit_transform {len(columns)} columns with:\n{transformers}')
input_df = options.get('input_df', self.input_df)
alias = options.get('alias')
Xt = self._get_col_subset(X, columns, input_df)
if transformers is not None:
with add_column_names_to_exception(columns):
if hasattr(transformers, 'fit_transform'):
Xt = _call_fit(transformers.fit_transform, Xt, y)
else:
_call_fit(transformers.fit, Xt, y)
Xt = transformers.transform(Xt)
extracted.append(self._fix_feature(Xt))
if logger.is_debug_enabled():
logger.debug(f'columns:{len(columns)}')
transformed_columns += self._get_names(columns, transformers, Xt, alias)
if logger.is_debug_enabled():
logger.debug(f'transformed_names_:{len(transformed_columns)}')
# handle features not explicitly selected
if built_default is not False and len(X.columns) > len(selected_columns):
unselected_columns = [c for c in X.columns.to_list() if c not in selected_columns]
Xt = self._get_col_subset(X, unselected_columns, self.input_df)
if built_default is not None:
with add_column_names_to_exception(unselected_columns):
if hasattr(built_default, 'fit_transform'):
Xt = _call_fit(built_default.fit_transform, Xt, y)
else:
_call_fit(built_default.fit, Xt, y)
Xt = built_default.transform(Xt)
transformed_columns += self._get_names(unselected_columns, built_default, Xt)
else:
# if not applying a default transformer, keep column names unmodified
transformed_columns += unselected_columns
extracted.append(self._fix_feature(Xt))
fitted_features.append((unselected_columns, built_default, {}))
self.fitted_features_ = fitted_features
return self._to_transform_result(X, extracted, transformed_columns)
@staticmethod
def _get_col_subset(X, cols, input_df=False):
t = X[cols]
if input_df:
return t
else:
return t.values
def _get_names(self, columns, transformer, x, alias=None):
"""
Return verbose names for the transformed columns.
columns name (or list of names) of the original column(s)
transformer transformer - can be a TransformerPipeline
x transformed columns (numpy.ndarray)
alias base name to use for the selected columns
"""
# logger.debug(
# f'get_names: {isinstance(columns, list)}, len(columns):{len(columns)} columns:{columns}, alias:{alias}')
if alias is not None:
name = alias
elif isinstance(columns, list):
name = '_'.join(map(str, columns))
else:
name = columns
num_cols = x.shape[1] if len(x.shape) > 1 else 1
if num_cols > 1:
# If there are as many columns as classes in the transformer,
# infer column names from classes names.
# If we are dealing with multiple transformers for these columns
# attempt to extract the names from each of them, starting from the
# last one
# logger.debug(f'transformer:{transformer}')
if isinstance(transformer, (TransformerPipeline, Pipeline)):
inverse_steps = transformer.steps[::-1]
# estimators = (estimator for _, estimator in inverse_steps)
# names_steps = (_get_feature_names(e, columns) for e in estimators)
# names = next((n for n in names_steps if n is not None), None)
names = None
for _, estimator in inverse_steps:
names = _get_feature_names(estimator, columns)
if names is not None and len(names) == num_cols:
break
else: # Otherwise use the only estimator present
names = _get_feature_names(transformer, columns)
if names is None and len(columns) == num_cols:
names = list(columns)
if logger.is_debug_enabled():
# logger.debug(f'names:{names}')
logger.debug(f'transformed names:{len(names)}')
if names is not None and len(names) == num_cols:
return list(names) # ['%s_%s' % (name, o) for o in names]
else: # otherwise, return name concatenated with '_1', '_2', etc.
return [name + '_' + str(o) for o in range(num_cols)]
else:
return [name]
@staticmethod
def _fix_feature(fea):
if _sparse.issparse(fea):
fea = fea.toarray()
if len(fea.shape) == 1:
"""
Convert 1-dimensional arrays to 2-dimensional column vectors.
"""
fea = np.array([fea]).T
return fea
def _to_transform_result(self, X, extracted, transformed_columns):
if extracted is None or len(extracted) == 0:
raise ValueError("No data output, ??? ")
if self.df_out:
df = self._to_df(X, extracted, transformed_columns)
df = self._dtype_transform(df)
return df
else:
return self._hstack_array(extracted)
@staticmethod
def _hstack_array(extracted):
stacked = np.hstack(extracted)
return stacked
def _to_df(self, X, extracted, columns):
# stacked = self._hstack_array(extracted)
#
# if isinstance(X, dd.DataFrame):
# df = dd.from_dask_array(stacked, columns=columns, index=None)
# else:
# # if no rows were dropped preserve the original index, otherwise use a new integer one
# no_rows_dropped = len(X) == len(stacked)
# index = X.index if no_rows_dropped else None
# df = pd.DataFrame(stacked, columns=columns, index=index)
#
# # output different data types, if appropriate
# dtypes = self._get_dtypes(extracted)
# for col, dtype, stype in zip(columns, dtypes, df.dtypes.tolist()):
# if dtype != stype:
# if logger.is_debug_enabled():
# logger.debug(f'convert {col} as {dtype} from {stype}')
# df[col] = df[col].astype(dtype)
dfs = [pd.DataFrame(arr, index=None) for arr in extracted]
df = pd.concat(dfs, axis=1, ignore_index=True) if len(dfs) > 1 else dfs[0]
df.columns = columns
if len(X) == len(df):
df.index = X.index # reuse the original index
return df
def _dtype_transform(self, df_out):
if self.df_out_dtype_transforms is not None:
for columns, dtype in self.df_out_dtype_transforms:
if callable(columns):
columns = columns(df_out)
if isinstance(columns, list) and len(columns) <= 0:
continue
df_out[columns] = df_out[columns].astype(dtype)
return df_out
| 40.216157
| 118
| 0.596069
|
cbcedb694b5d42b85316e4662f0e96cec99d0ddd
| 2,826
|
py
|
Python
|
instagram/views.py
|
Nancy-Muthinzi/ip2-instagram
|
cdfeb72b02dae35d5b6089386b5e61402a4b288b
|
[
"MIT"
] | null | null | null |
instagram/views.py
|
Nancy-Muthinzi/ip2-instagram
|
cdfeb72b02dae35d5b6089386b5e61402a4b288b
|
[
"MIT"
] | null | null | null |
instagram/views.py
|
Nancy-Muthinzi/ip2-instagram
|
cdfeb72b02dae35d5b6089386b5e61402a4b288b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseRedirect
import datetime as dt
from .models import Image, Profile, Comment, User
from django.contrib.auth.decorators import login_required
from .forms import CommentForm
from .email import send_welcome_email
# Create your views here.
def login(request):
'''
This is where a user logs in after signing up to the app
'''
return render(request, 'registration/login.html')
def registration_form(request):
return render(request, 'registration/registration_form.html')
@login_required(login_url='/accounts/login/')
def home(request):
'''
This is the current user's profile page
'''
images = Image.objects.all()
date = dt.date.today()
profiles = Profile.objects.all()
comments = Comment.objects.all()
users = User.objects.all()
profile_photo = User.objects.all()
current_user = request.user
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.user = current_user
comment.save()
return redirect('home')
else:
form = CommentForm()
return render(request, 'home.html', {'images': images, 'commentForm': form, 'date': date, 'profiles': profiles, 'comments': comments, 'users': user})
@login_required(login_url='/accounts/login/')
def profile(request, id):
current_user = request.user
profiles = Profile.objects.get(user=current_user)
images = Image.objects.filter(user=current_user)
comments = Comment.objects.all()
return render(request, 'profile.html', {'profile': profiles, "images": images})
@login_required(login_url='/accounts/login/')
def user(request):
users = User.objects.all()
images = Image.objects.all()
return render(request, 'home.html')
@login_required(login_url='/accounts/login/')
def search_results(request):
if 'user' in request.GET and request.GET["user"]:
search_term = request.GET.get("user")
searched_users = User.search_by_user_name(search_term)
message = f"{search_term}"
return render(request, 'search.html', {"message": message, "users": searched_users})
else:
message = "You haven't made any searches"
return render(request, 'search.html', {"message": message})
def comment(request, post_id):
post = get_object_or_404(Image, pk=post_id)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.user = request.user
comment.image = post
comment.save()
return redirect('home')
return render(request, 'home.html')
| 29.747368
| 153
| 0.672682
|
afff92c5ef42dbd22f67a003b845a8c695d93677
| 967
|
py
|
Python
|
app/user/views.py
|
beingvikasagain/recipe-app-api
|
057771c5ae9103044d67785cf38d293a462dbc9e
|
[
"MIT"
] | null | null | null |
app/user/views.py
|
beingvikasagain/recipe-app-api
|
057771c5ae9103044d67785cf38d293a462dbc9e
|
[
"MIT"
] | null | null | null |
app/user/views.py
|
beingvikasagain/recipe-app-api
|
057771c5ae9103044d67785cf38d293a462dbc9e
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from user.serializers import UserSerializer, AuthTokenSerializer
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create auth token for users"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the autheticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve and return authetication users"""
return self.request.user
| 35.814815
| 66
| 0.783868
|
ee4a1d2cc9b1fb5868f9a2ae622437a37f0baf27
| 21,137
|
py
|
Python
|
python/src/wslink/launcher.py
|
yasushi-saito/wslink
|
25c7d3b2f6da25b100bc69efa4e06b1defe22ca1
|
[
"BSD-3-Clause"
] | 51
|
2017-06-01T20:12:48.000Z
|
2022-03-28T08:18:00.000Z
|
python/src/wslink/launcher.py
|
yasushi-saito/wslink
|
25c7d3b2f6da25b100bc69efa4e06b1defe22ca1
|
[
"BSD-3-Clause"
] | 70
|
2017-07-17T23:37:40.000Z
|
2022-02-27T01:58:47.000Z
|
python/src/wslink/launcher.py
|
yasushi-saito/wslink
|
25c7d3b2f6da25b100bc69efa4e06b1defe22ca1
|
[
"BSD-3-Clause"
] | 19
|
2017-05-17T12:47:54.000Z
|
2021-11-22T03:37:47.000Z
|
import argparse
import asyncio
import datetime
import io
import json
import logging
import os
import re
import string
import subprocess
import sys
import time
import uuid
from random import choice
from wslink import backends
STATUS_OK = 200
STATUS_BAD_REQUEST = 400
STATUS_NOT_FOUND = 404
STATUS_SERVICE_UNAVAILABLE = 503
sample_config_file = """
Here is a sample of what a configuration file could look like:
{
// ===============================
// General launcher configuration
// ===============================
"configuration": {
"host" : "localhost",
"port" : 8080,
"endpoint": "paraview", // SessionManager Endpoint
"content": "/.../www", // Optional: Directory shared over HTTP
"proxy_file" : "/.../proxy-mapping.txt", // Proxy-Mapping file for Apache
"sessionURL" : "ws://${host}:${port}/ws", // ws url used by the client to connect to the started process
"timeout" : 25, // Wait time in second after process start
"log_dir" : "/.../viz-logs", // Directory for log files
"fields" : ["file", "host", "port"] // List of fields that should be send back to client
// include "secret" if you provide it as an --authKey to the app
"sanitize": { // Check information coming from the client
"cmd": {
"type": "inList", // 'cmd' must be one of the strings in 'list'
"list": [
"me", "you", "something/else/altogether", "nothing-to-do"
],
"default": "nothing-to-do" // If the string doesn't match, replace it with the default.
// Include the default in your list
},
"cmd2": { // 'cmd2' must match the regexp provided, example: not a quote
"type": "regexp",
"regexp": "^[^\"]*$", // Make sure to include '^' and '$' to match the entire string!
"default": "nothing"
}
}
},
// ===============================
// Useful session vars for client
// ===============================
"sessionData" : { "key": "value" }, // Dictionary of values interesting to the client
// ===============================
// Resources list for applications
// ===============================
"resources" : [ { "host" : "localhost", "port_range" : [9001, 9003] } ],
// ===============================
// Set of properties for cmd line
// ===============================
"properties" : {
"vtkpython" : "/.../VTK/build/bin/vtkpython",
"pvpython" : "/.../ParaView/build/bin/pvpython",
"vtk_python_path": "/.../VTK/build/Wrapping/Python/vtk/web",
"pv_python_path": "/.../ParaView/build/lib/site-packages/paraview/web",
"plugins_path": "/.../ParaView/build/lib",
"dataDir": "/.../path/to/data/directory"
},
// ===============================
// Application list with cmd lines
// ===============================
"apps" : {
"cone" : {
"cmd" : [
"${vtkpython}", "${vtk_python_path}/vtk_web_cone.py", "--port", "$port" ],
"ready_line" : "Starting factory"
},
"graph" : {
"cmd" : [
"${vtkpython}", "${vtk_python_path}/vtk_web_graph.py", "--port", "$port",
"--vertices", "${numberOfVertices}", "--edges", "${numberOfEdges}" ],
"ready_line" : "Starting factory"
},
"phylotree" : {
"cmd" : [
"${vtkpython}", "${vtk_python_path}/vtk_web_phylogenetic_tree.py", "--port", "$port",
"--tree", "${dataDir}/visomics/${treeFile}", "--table", "${dataDir}/visomics/${tableFile}" ],
"ready_line" : "Starting factory"
},
"filebrowser" : {
"cmd" : [
"${vtkpython}", "${vtk_python_path}/vtk_web_filebrowser.py",
"--port", "${port}", "--data-dir", "${dataDir}" ],
"ready_line" : "Starting factory"
},
"data_prober": {
"cmd": [
"${pvpython}", "-dr", "${pv_python_path}/pv_web_data_prober.py",
"--port", "${port}", "--data-dir", "${dataDir}", "-f" ],
"ready_line" : "Starting factory"
},
"visualizer": {
"cmd": [
"${pvpython}", "-dr", "${pv_python_path}/pv_web_visualizer.py",
"--plugins", "${plugins_path}/libPointSprite_Plugin.so", "--port", "${port}",
"--data-dir", "${dataDir}", "--load-file", "${dataDir}/${fileToLoad}",
"--authKey", "${secret}", "-f" ], // Use of ${secret} means it needs to be provided to the client, in "fields", above.
"ready_line" : "Starting factory"
},
"loader": {
"cmd": [
"${pvpython}", "-dr", "${pv_python_path}/pv_web_file_loader.py",
"--port", "${port}", "--data-dir", "${dataDir}",
"--load-file", "${dataDir}/${fileToLoad}", "-f" ],
"ready_line" : "Starting factory"
},
"launcher" : {
"cmd": [
"/.../ParaView/Web/Applications/Parallel/server/launcher.sh",
"${port}", "${client}", "${resources}", "${file}" ],
"ready_line" : "Starting factory"
},
"your_app": {
"cmd": [
"your_shell_script.sh", "--resource-host", "${host}", "--resource-port", "${port}",
"--session-id", "${id}", "--generated-password", "${secret}",
"--application-key", "${application}" ],
"ready_line": "Output line from your shell script indicating process is ready"
}
}
"""
# =============================================================================
# Helper module methods
# =============================================================================
def remove_comments(json_like):
"""
Removes C-style comments from *json_like* and returns the result. Example::
>>> test_json = '''\
{
"foo": "bar", // This is a single-line comment
"baz": "blah" /* Multi-line
Comment */
}'''
>>> remove_comments('{"foo":"bar","baz":"blah",}')
'{\n "foo":"bar",\n "baz":"blah"\n}'
From: https://gist.github.com/liftoff/ee7b81659673eca23cd9fc0d8b8e68b7
"""
comments_re = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE,
)
def replacer(match):
s = match.group(0)
if s[0] == "/":
return ""
return s
return comments_re.sub(replacer, json_like)
def generatePassword():
return "".join(choice(string.ascii_letters + string.digits) for _ in range(16))
# -----------------------------------------------------------------------------
def validateKeySet(obj, expected_keys, object_name):
all_key_found = True
for key in expected_keys:
if not key in obj:
print("ERROR: %s is missing %s key." % (object_name, key))
all_key_found = False
return all_key_found
def checkSanitize(key_pair, sanitize):
if not sanitize:
return
for key in sanitize:
if key in key_pair:
checkItem = sanitize[key]
value = key_pair[key]
if checkItem["type"] == "inList":
if not value in checkItem["list"]:
logging.warning(
"key %s: sanitize %s with default" % (key, key_pair[key])
)
key_pair[key] = checkItem["default"]
elif checkItem["type"] == "regexp":
if not "compiled" in checkItem:
# User is responsible to add begin- and end- string symbols, to make sure entire string is matched.
checkItem["compiled"] = re.compile(checkItem["regexp"])
if checkItem["compiled"].match(value) == None:
logging.warning(
"key %s: sanitize %s with default" % (key, key_pair[key])
)
key_pair[key] = checkItem["default"]
# -----------------------------------------------------------------------------
# guard against malicious clients - make sure substitution is expected, if 'sanitize' is provided
# -----------------------------------------------------------------------------
def replaceVariables(template_str, variable_list, sanitize):
for key_pair in variable_list:
checkSanitize(key_pair, sanitize)
item_template = string.Template(template_str)
template_str = item_template.safe_substitute(key_pair)
if "$" in template_str:
logging.error("Some properties could not be resolved: " + template_str)
return template_str
# -----------------------------------------------------------------------------
def replaceList(template_list, variable_list, sanitize):
result_list = []
for template_str in template_list:
result_list.append(replaceVariables(template_str, variable_list, sanitize))
return result_list
# -----------------------------------------------------------------------------
def filterResponse(obj, public_keys):
public_keys.extend(["id", "sessionURL", "sessionManagerURL"])
filtered_output = {}
for field in obj:
if field in public_keys:
filtered_output[field] = obj[field]
return filtered_output
# -----------------------------------------------------------------------------
def extractSessionId(request):
path = request.path.split("/")
if len(path) < 3:
return None
return str(path[2])
def jsonResponse(payload):
return json.dumps(payload, ensure_ascii=False).encode("utf8")
# =============================================================================
# Session manager
# =============================================================================
class SessionManager(object):
def __init__(self, config, mapping):
self.sessions = {}
self.config = config
self.resources = ResourceManager(config["resources"])
self.mapping = mapping
self.sanitize = config["configuration"]["sanitize"]
def createSession(self, options):
# Assign id and store options
id = str(uuid.uuid1())
# Assign resource to session
host, port = self.resources.getNextResource()
# Do we have resources
if host:
options["id"] = id
options["host"] = host
options["port"] = port
if not "secret" in options:
options["secret"] = generatePassword()
options["sessionURL"] = replaceVariables(
self.config["configuration"]["sessionURL"],
[options, self.config["properties"]],
self.sanitize,
)
options["cmd"] = replaceList(
self.config["apps"][options["application"]]["cmd"],
[options, self.config["properties"]],
self.sanitize,
)
if "sessionData" in self.config:
for key in self.config["sessionData"]:
options[key] = replaceVariables(
self.config["sessionData"][key],
[options, self.config["properties"]],
self.sanitize,
)
self.sessions[id] = options
self.mapping.update(self.sessions)
return options
return None
def deleteSession(self, id):
host = self.sessions[id]["host"]
port = self.sessions[id]["port"]
self.resources.freeResource(host, port)
del self.sessions[id]
self.mapping.update(self.sessions)
def getSession(self, id):
if id in self.sessions:
return self.sessions[id]
return None
# =============================================================================
# Proxy manager
# =============================================================================
class ProxyMappingManager(object):
def update(sessions):
pass
class ProxyMappingManagerTXT(ProxyMappingManager):
def __init__(self, file_path, pattern="%s %s:%d\n"):
self.file_path = file_path
self.pattern = pattern
def update(self, sessions):
with io.open(self.file_path, "w", encoding="utf-8") as map_file:
for id in sessions:
map_file.write(
self.pattern % (id, sessions[id]["host"], sessions[id]["port"])
)
# =============================================================================
# Resource manager
# =============================================================================
class ResourceManager(object):
"""
Class that provides methods to keep track on available resources (host/port)
"""
def __init__(self, resourceList):
self.resources = {}
for resource in resourceList:
host = resource["host"]
portList = list(
range(resource["port_range"][0], resource["port_range"][1] + 1)
)
if host in self.resources:
self.resources[host]["available"].extend(portList)
else:
self.resources[host] = {"available": portList, "used": []}
def getNextResource(self):
"""
Return a (host, port) pair if any available otherwise will return None
"""
# find host with max availibility
winner = None
availibilityCount = 0
for host in self.resources:
if availibilityCount < len(self.resources[host]["available"]):
availibilityCount = len(self.resources[host]["available"])
winner = host
if winner:
port = self.resources[winner]["available"].pop()
self.resources[winner]["used"].append(port)
return (winner, port)
return (None, None)
def freeResource(self, host, port):
"""
Free a previously reserved resource
"""
if host in self.resources and port in self.resources[host]["used"]:
self.resources[host]["used"].remove(port)
self.resources[host]["available"].append(port)
# =============================================================================
# Process manager
# =============================================================================
class ProcessManager(object):
def __init__(self, configuration):
self.config = configuration
self.log_dir = configuration["configuration"]["log_dir"]
self.processes = {}
def __del__(self):
for id in self.processes:
self.processes[id].terminate()
def _getLogFilePath(self, id):
return "%s%s%s.txt" % (self.log_dir, os.sep, id)
def startProcess(self, session):
proc = None
# Create output log file
logFilePath = self._getLogFilePath(session["id"])
with io.open(logFilePath, mode="a+", buffering=1, encoding="utf-8") as log_file:
try:
proc = subprocess.Popen(
session["cmd"], stdout=log_file, stderr=log_file
)
self.processes[session["id"]] = proc
except:
logging.error("The command line failed")
logging.error(" ".join(map(str, session["cmd"])))
return None
return proc
def stopProcess(self, id):
proc = self.processes[id]
del self.processes[id]
try:
proc.terminate()
except:
pass # we tried
def listEndedProcess(self):
session_to_release = []
for id in self.processes:
if self.processes[id].poll() is not None:
session_to_release.append(id)
return session_to_release
def isRunning(self, id):
return self.processes[id].poll() is None
# ========================================================================
# Look for ready line in process output. Return True if found, False
# otherwise. If no ready_line is configured and process is running return
# False. This will then rely on the timout time.
# ========================================================================
def isReady(self, session, count=0):
id = session["id"]
# The process has to be running to be ready!
if not self.isRunning(id) and count < 60:
return False
# Give up after 60 seconds if still not running
if not self.isRunning(id):
return True
application = self.config["apps"][session["application"]]
ready_line = application.get("ready_line", None)
# If no ready_line is configured and the process is running then thats
# enough.
if not ready_line:
return False
ready = False
# Check the output for ready_line
logFilePath = self._getLogFilePath(session["id"])
with io.open(logFilePath, "r", 1, encoding="utf-8") as log_file:
for line in log_file.readlines():
if ready_line in line:
ready = True
break
return ready
# =============================================================================
# Parse config file
# =============================================================================
def parseConfig(options):
# Read values from the configuration file
try:
config_comments = remove_comments(
io.open(options.config[0], encoding="utf-8").read()
)
config = json.loads(config_comments)
except:
message = "ERROR: Unable to read config file.\n"
message += str(sys.exc_info()[1]) + "\n" + str(sys.exc_info()[2])
print(message)
print(sample_config_file)
sys.exit(2)
expected_keys = ["configuration", "apps", "properties", "resources"]
if not validateKeySet(config, expected_keys, "Config file"):
print(sample_config_file)
sys.exit(2)
expected_keys = [
"endpoint",
"host",
"port",
"proxy_file",
"sessionURL",
"timeout",
"log_dir",
"fields",
]
if not validateKeySet(config["configuration"], expected_keys, "file.configuration"):
print(sample_config_file)
sys.exit(2)
if not "content" in config["configuration"]:
config["configuration"]["content"] = ""
if not "sanitize" in config["configuration"]:
config["configuration"]["sanitize"] = {}
return config
# =============================================================================
# Setup default arguments to be parsed
# -d, --debug
# -t, --proxyFileType Type of proxy file (txt, dbm)
# =============================================================================
def add_arguments(parser):
parser.add_argument(
"config", type=str, nargs=1, help="configuration file for the launcher"
)
parser.add_argument(
"-d", "--debug", help="log debugging messages to stdout", action="store_true"
)
parser.add_argument(
"--backend", help="Server implementation to use (aiohttp)", default="aiohttp"
)
return parser
# =============================================================================
# Parse arguments
# =============================================================================
def start(argv=None, description="wslink Web Launcher"):
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=sample_config_file,
)
add_arguments(parser)
args = parser.parse_args(argv)
config = parseConfig(args)
backends.launcher_start(args, config, backend=args.backend)
# Used for backward compatibility
def startWebServer(args, config, backend="aiohttp"):
return backends.launcher_start(args, config, backend=backend)
# =============================================================================
# Main
# =============================================================================
if __name__ == "__main__":
start()
| 35.053068
| 139
| 0.477977
|
88a838bb1dc662f138a4aa70ace30d93b568fbe8
| 2,161
|
py
|
Python
|
app/client.py
|
egregiouss/http-client
|
6a368724f15986906a2a413c9646f0cd7dbaa2c7
|
[
"MIT"
] | null | null | null |
app/client.py
|
egregiouss/http-client
|
6a368724f15986906a2a413c9646f0cd7dbaa2c7
|
[
"MIT"
] | null | null | null |
app/client.py
|
egregiouss/http-client
|
6a368724f15986906a2a413c9646f0cd7dbaa2c7
|
[
"MIT"
] | null | null | null |
import logging
import socket
import sys
import app.parser as parser
from app.request import Request
from app.response import Response
import re
from app.errors import *
cookies = {}
logger = logging.getLogger(__name__)
def parse_uri(uri):
_path = '/'
_scheme = ''
if re.match('http://', uri):
uri = uri[7:]
_scheme = 'http'
elif re.match(r'https://', uri):
uri = uri[8:]
_scheme = 'https'
_host = uri.split('/')[0]
if '/' in uri:
_path += uri[len(_host) + 1:]
return _host, _path, _scheme
def send(args):
args = parser.convert_to_list(args)
_host, _path, scheme = parse_uri(args[0])
del args[0]
_request = Request(_host, _path, scheme, *args)
_request.set_cookies(cookies)
try:
_sock = _request.send_data()
except socket.gaierror:
logger.info('bad request')
raise socket.gaierror
return _sock, _host, _path, scheme
def get(_sock, _host, args):
_response = Response(_sock, args.file)
_response.receive(args.file, args.streaming, args.verbose)
return _response
def change_url(addr, host, scheme):
if host in addr:
return addr
else:
return scheme + '://' + host + addr
def main():
try:
arguments = parser.parse_args()
sock, host, path, scheme = send(arguments)
print(sock, host, path, scheme )
response = get(sock, host, arguments)
while re.search(r'3\d\d', response.headers['code'].decode()) and arguments.redirects:
try:
addr = response.headers['location'][:-2]
try:
arguments.uri = change_url(addr, host, scheme)
sock, host, path, scheme = send(arguments)
except ValueError:
continue
response = get(sock, host, arguments)
except KeyError:
logger.info('theres no address to go')
if not arguments.streaming:
response.print()
except KeyboardInterrupt:
logger.info('closing connections')
logger.info('client closed')
sys.exit()
| 24.280899
| 93
| 0.58584
|
7b4029e697552b50d4be5cb077b29db14760629c
| 8,174
|
py
|
Python
|
pydre/merge_tool.py
|
tkerwin/pydre
|
a100779801ffdc9855f09328bd28bf79ab6e679b
|
[
"Apache-2.0"
] | null | null | null |
pydre/merge_tool.py
|
tkerwin/pydre
|
a100779801ffdc9855f09328bd28bf79ab6e679b
|
[
"Apache-2.0"
] | null | null | null |
pydre/merge_tool.py
|
tkerwin/pydre
|
a100779801ffdc9855f09328bd28bf79ab6e679b
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
import math
import glob
import pandas as pd
import re
import logging
import shutil
logger = logging.getLogger(__name__)
regular_expressions = ['(?:.*\\\)?(.*)_Sub_(\d+)_Drive_\d+\.dat', '(?:.*\\\)?([^_]+)_([^_]+)_([^_]+)_(\d+).dat']
regular_expressions_glob = ['*_Sub_*_Drive_*.dat', '*_*_*_*.dat']
regular_expressions_group = ['.*_Sub_(\d+)_Drive_(\d+)\.dat', '.*_(.*)_(.*)_(\d+)\.dat']
# 0 - SimObserver3 data file format
# 1 - SimCreator DX data file format
class MergeTool():
# Creates a dict with an entry for each subject.
# The value of each entry in the dict is a list of
# drives in the order that they are to be merged.
# Currently just orders by driveID from lowest to highest
def groupBySubject(self, file_list, exp_index):
global regular_expressions_group
groups = {}
name_pattern = re.compile(str(regular_expressions_group[exp_index]))
for file_name in file_list:
match = name_pattern.match(file_name)
if (match):
subject = match.group(1)
drive = match.group(2)
if exp_index == 2:
drive = match.group(3)
if (subject in groups):
drive_group = groups[subject]
i = 0
while i < len(drive_group):
other_drive = name_pattern.match(drive_group[i]).group(2)
if (drive > other_drive):
i = i + 1
else:
break
drive_group.insert(i, file_name)
else:
groups[subject] = [file_name]
return groups
def sequential_merge(self, input_directory, exp_index):
global regular_expressions
global regular_expressions_glob
out_dir = os.makedirs(os.path.join(input_directory, 'MergedData'), exist_ok=True)
out_dir_name = os.path.join(input_directory, 'MergedData')
file_list = sorted(glob.glob(input_directory + '/' + regular_expressions_glob[exp_index]))
subject_groups = self.groupBySubject(file_list, exp_index)
warning = True
for key in subject_groups:
warning = False
logger.info("merging for subject: ", key)
drive_group = subject_groups[key]
drive_group.sort()
out_frame = pd.read_csv(drive_group[0], sep=' ', na_values='.', engine="c")
name_pattern = re.compile(regular_expressions[exp_index])
out_name = ""
if exp_index == 0:
match = name_pattern.match(drive_group[0])
study = match.group(1)
subject = match.group(2)
out_name = study + "_Sub_" + subject + "_Drive_0.dat"
elif exp_index == 1:
match = name_pattern.match(drive_group[0])
model = match.group(1)
part_id = match.group(2)
scen_name = match.group(3)
unique_id = match.group(4)
out_name = model + "_" + part_id + "_" + scen_name + "_" + str(int(unique_id)+1) + ".dat"
source_dir = drive_group[0]
out_frame['SourceId'] = source_dir[max(source_dir.find('\\'), source_dir.find('/')):]
for drive in drive_group[1:]:
# The latest out_frame's final SimTime. To be added across next_frame's SimTime column as a constant.
# '-1' indices didn't work here, threw a pandas error. But this code produces desired result.
timejump = out_frame[:]["SimTime"]
timejumpdat = out_frame[:]["DatTime"]
timeconstant = timejump[len(timejump) - 1]
timejumpdat = out_frame[:]["DatTime"]
timeconstantdat = timejumpdat[len(timejumpdat) - 1]
next_frame = pd.read_csv(drive, sep=' ', na_values='.', engine="c")
next_frame["SimTime"] += timeconstant
next_frame["DatTime"] += timeconstantdat
source_dir = drive
next_frame['SourceId'] = source_dir[max(source_dir.find('\\'), source_dir.find('/')):]
out_frame = out_frame.append(next_frame, ignore_index=True)
out_frame.to_csv(os.path.join(out_dir_name, out_name),
sep=' ', na_rep=".", index=False)
if warning is True:
logger.warning("No files processed, check merge directory (-d) to ensure there are valid data files present.")
def spatial_merge(self, input_directory, exp_index):
global regular_expressions
os.makedirs(os.path.join(input_directory, 'MergedData'), exist_ok=True)
out_dir_name = os.path.join(input_directory, 'MergedData')
file_list = glob.glob(input_directory + '/' + regular_expressions_glob[exp_index])
subject_groups = self.groupBySubject(file_list, exp_index)
for key in subject_groups:
drive_group = subject_groups[key]
out_frame = pd.read_csv(drive_group[0], sep=' ', na_values='.')
if (len(drive_group) > 1):
i = 0
while i < len(drive_group):
i = i + 1
last_line = out_frame.tail(1)
last_x = last_line.XPos.iloc[0]
last_y = last_line.YPos.iloc[0]
last_time = last_line.SimTime.iloc[0]
next_frame = pd.read_csv(drive_group[i], sep=' ', na_values='.')
min_dist = float('inf')
min_index = 0
for index, row in next_frame.iterrows():
dist = (row.XPos - last_x) ** 2 + (row.YPos - last_y) ** 2
if (dist < min_dist):
min_index = index
min_dist = dist
start_time = next_frame.iloc[min_index].SimTime
next_frame = next_frame[min_index:]
next_frame["SimTime"] += last_time
out_frame = out_frame.append(next_frame, ignore_index=True)
if i + 1 >= len(drive_group):
break
name_pattern = re.compile(regular_expressions[exp_index])
match = name_pattern.match(drive_group[0])
study = match.group(1)
subject = match.group(2)
out_name = ""
if exp_index == 0:
match = name_pattern.match(drive_group[0])
study = match.group(1)
subject = match.group(2)
out_name = study + "_Sub_" + subject + "_Drive_0.dat"
elif exp_index == 1:
match = name_pattern.match(drive_group[0])
model = match.group(1)
part_id = match.group(2)
scen_name = match.group(3)
#unique_id = match.group(4)
out_name = model + "_" + part_id + "_" + scen_name + "_" + str(int(unique_id)+1) + ".dat"
out_frame.to_csv(os.path.join(out_dir_name, out_name), sep=' ')
else:
name_pattern = re.compile(regular_exp)
match = name_pattern.match(drive_group[0])
base_filename = match.group(1)
out_frame.to_csv(os.path.join(out_dir_name, base_filename), sep=' ')
def __init__(self, input_directory, merge_type="spatial", regular_expression_index=0):
global regular_expressions
logger.warning(regular_expressions[regular_expression_index])
upper_type = merge_type.upper()
if upper_type == "SPATIAL":
self.spatial_merge(input_directory, regular_expression_index)
elif upper_type == "SEQUENTIAL":
self.sequential_merge(input_directory, regular_expression_index)
else:
raise ValueError(
"Merge type \"{}\" not supported. Valid merge types: \"Spatial\" or \"Sequential\"".format(merge_type))
| 45.160221
| 122
| 0.54918
|
cbb6c81dbb455fabce4f5052283272d9d2f9d5a0
| 16,205
|
py
|
Python
|
qiskit/opflow/primitive_ops/pauli_sum_op.py
|
navaneethsdk/qiskit-terra
|
66a029f2a67c14dbf34857d172b088d75d152b55
|
[
"Apache-2.0"
] | null | null | null |
qiskit/opflow/primitive_ops/pauli_sum_op.py
|
navaneethsdk/qiskit-terra
|
66a029f2a67c14dbf34857d172b088d75d152b55
|
[
"Apache-2.0"
] | null | null | null |
qiskit/opflow/primitive_ops/pauli_sum_op.py
|
navaneethsdk/qiskit-terra
|
66a029f2a67c14dbf34857d172b088d75d152b55
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""PauliSumOp Class """
from collections import defaultdict
from typing import Dict, List, Optional, Set, Tuple, Union, cast
import numpy as np
from scipy.sparse import spmatrix
from qiskit.circuit import Instruction, ParameterExpression
from qiskit.quantum_info import Pauli, SparsePauliOp
from qiskit.quantum_info.operators.symplectic.pauli_table import PauliTable
from qiskit.quantum_info.operators.custom_iterator import CustomIterator
from ..exceptions import OpflowError
from ..list_ops.summed_op import SummedOp
from ..list_ops.tensored_op import TensoredOp
from ..operator_base import OperatorBase
from .primitive_op import PrimitiveOp
class PauliSumOp(PrimitiveOp):
"""Class for Operators backend by Terra's ``SparsePauliOp`` class."""
def __init__(
self,
primitive: SparsePauliOp,
coeff: Union[int, float, complex, ParameterExpression] = 1.0,
grouping_type: str = "None",
) -> None:
"""
Args:
primitive: The SparsePauliOp which defines the behavior of the underlying function.
coeff: A coefficient multiplying the primitive.
grouping_type: The type of grouping. If None, the operator is not grouped.
Raises:
TypeError: invalid parameters.
"""
if not isinstance(primitive, SparsePauliOp):
raise TypeError(
f"PauliSumOp can only be instantiated with SparsePauliOp, not {type(primitive)}"
)
super().__init__(primitive, coeff=coeff)
self._grouping_type = grouping_type
def primitive_strings(self) -> Set[str]:
return {"SparsePauliOp"}
@property
def grouping_type(self) -> str:
"""
Returns: Type of Grouping
"""
return self._grouping_type
@property
def num_qubits(self) -> int:
return self.primitive.num_qubits # type: ignore
@property
def coeffs(self):
"""Return the Pauli coefficients."""
return self.coeff * self.primitive.coeffs
def matrix_iter(self, sparse=False):
"""Return a matrix representation iterator.
This is a lazy iterator that converts each term in the PauliSumOp
into a matrix as it is used. To convert to a single matrix use the
:meth:`to_matrix` method.
Args:
sparse (bool): optionally return sparse CSR matrices if True,
otherwise return Numpy array matrices
(Default: False)
Returns:
MatrixIterator: matrix iterator object for the PauliTable.
"""
class MatrixIterator(CustomIterator):
"""Matrix representation iteration and item access."""
def __repr__(self):
return "<PauliSumOp_matrix_iterator at {}>".format(hex(id(self)))
def __getitem__(self, key):
sumopcoeff = self.obj.coeff * self.obj.primitive.coeffs[key]
mat = PauliTable._to_matrix(self.obj.primitive.table.array[key],
sparse=sparse)
return sumopcoeff * mat
return MatrixIterator(self)
def add(self, other: OperatorBase) -> OperatorBase:
if not self.num_qubits == other.num_qubits:
raise ValueError(
f"Sum of operators with different numbers of qubits, {self.num_qubits} and "
f"{other.num_qubits}, is not well defined"
)
if isinstance(other, PauliSumOp):
return PauliSumOp(
self.coeff * self.primitive + other.coeff * other.primitive, coeff=1 # type: ignore
)
from .pauli_op import PauliOp
if isinstance(other, PauliOp):
return PauliSumOp(
self.coeff * self.primitive # type: ignore
+ other.coeff * SparsePauliOp(other.primitive)
)
return SummedOp([self, other])
def mul(self, scalar: Union[int, float, complex, ParameterExpression]) -> OperatorBase:
if isinstance(scalar, (int, float, complex)) and scalar != 0:
return PauliSumOp(scalar * self.primitive, coeff=self.coeff) # type: ignore
return super().mul(scalar)
def adjoint(self) -> OperatorBase:
return PauliSumOp(
self.primitive.adjoint(), coeff=self.coeff.conjugate()
)
def equals(self, other: OperatorBase) -> bool:
self_reduced, other_reduced = self.reduce(), other.reduce()
if not isinstance(other_reduced, PauliSumOp):
return False
if isinstance(self_reduced.coeff, ParameterExpression) or isinstance(
other_reduced.coeff, ParameterExpression
):
return (
self_reduced.coeff == other_reduced.coeff
and self_reduced.primitive == other_reduced.primitive # type:ignore
)
return (
len(self_reduced) == len(other_reduced)
and self_reduced.primitive == other_reduced.primitive
)
def _expand_dim(self, num_qubits: int) -> "PauliSumOp":
return PauliSumOp(
self.primitive.tensor( # type:ignore
SparsePauliOp(Pauli("I" * num_qubits))
),
coeff=self.coeff,
)
def tensor(self, other: OperatorBase) -> OperatorBase:
if isinstance(other, PauliSumOp):
return PauliSumOp(
self.primitive.tensor(other.primitive), # type:ignore
coeff=self.coeff * other.coeff,
)
return TensoredOp([self, other])
def permute(self, permutation: List[int]) -> "PauliSumOp":
"""Permutes the sequence of ``PauliSumOp``.
Args:
permutation: A list defining where each Pauli should be permuted. The Pauli at index
j of the primitive should be permuted to position permutation[j].
Returns:
A new PauliSumOp representing the permuted operator. For operator (X ^ Y ^ Z) and
indices=[1,2,4], it returns (X ^ I ^ Y ^ Z ^ I).
Raises:
OpflowError: if indices do not define a new index for each qubit.
"""
if len(permutation) != self.num_qubits:
raise OpflowError("List of indices to permute must have the "
"same size as Pauli Operator")
length = max(permutation) + 1
spop = self.primitive.tensor( # type:ignore
SparsePauliOp(Pauli("I" * (length - self.num_qubits)))
)
permutation = [i for i in range(length) if i not in permutation] + permutation
permutation = np.arange(length)[np.argsort(permutation)]
permutation = np.hstack([permutation, permutation + length]) # type: ignore
spop.table.array = spop.table.array[:, permutation]
return PauliSumOp(spop, self.coeff)
def compose(
self,
other: OperatorBase,
permutation: Optional[List[int]] = None,
front: bool = False,
) -> OperatorBase:
new_self, other = self._expand_shorter_operator_and_permute(other, permutation)
new_self = cast(PauliSumOp, new_self)
if front:
return other.compose(new_self)
# If self is identity, just return other.
if not np.any(new_self.primitive.table.array): # type: ignore
return other * new_self.coeff * sum(new_self.primitive.coeffs) # type: ignore
# Both PauliSumOps
if isinstance(other, PauliSumOp):
return PauliSumOp(
new_self.primitive * other.primitive, # type:ignore
coeff=new_self.coeff * other.coeff,
)
# TODO: implement compose with PauliOp
# pylint: disable=cyclic-import,import-outside-toplevel
from ..state_fns.circuit_state_fn import CircuitStateFn
from .circuit_op import CircuitOp
if isinstance(other, (CircuitOp, CircuitStateFn)):
return new_self.to_pauli_op().to_circuit_op().compose(other) # type: ignore
return super(PauliSumOp, new_self).compose(other)
def to_matrix(self, massive: bool = False) -> np.ndarray:
OperatorBase._check_massive("to_matrix", True, self.num_qubits, massive)
if isinstance(self.coeff, ParameterExpression):
return (self.primitive.to_matrix(sparse=True)).toarray() * self.coeff # type: ignore
return (self.primitive.to_matrix(sparse=True) * self.coeff).toarray() # type: ignore
def __str__(self) -> str:
def format_sign(x):
return x.real if np.isreal(x) else x
def format_number(x):
x = format_sign(x)
if isinstance(x, (int, float)) and x < 0:
return f"- {-x}"
return f"+ {x}"
indent = "" if self.coeff == 1 else " "
prim_list = self.primitive.to_list() # type: ignore
if prim_list:
first = prim_list[0]
if isinstance(first[1], (int, float)) and first[1] < 0:
main_string = indent + f"- {-first[1].real} * {first[0]}"
else:
main_string = indent + f"{format_sign(first[1])} * {first[0]}"
main_string += "".join([f"\n{indent}{format_number(c)} * {p}" for p, c in prim_list[1:]])
return f"{main_string}" if self.coeff == 1 else f"{self.coeff} * (\n{main_string}\n)"
def eval(
self,
front: Optional[Union[str, Dict[str, complex], np.ndarray, OperatorBase]] = None,
) -> Union[OperatorBase, float, complex]:
if front is None:
return self.to_matrix_op()
# pylint: disable=import-outside-toplevel,cyclic-import
from ..list_ops.list_op import ListOp
from ..state_fns.circuit_state_fn import CircuitStateFn
from ..state_fns.dict_state_fn import DictStateFn
from ..state_fns.state_fn import StateFn
from .circuit_op import CircuitOp
from .pauli_op import PauliOp
# For now, always do this. If it's not performant, we can be more granular.
if not isinstance(front, OperatorBase):
front = StateFn(front, is_measurement=False)
if isinstance(front, ListOp) and front.distributive:
return front.combo_fn(
[self.eval(front.coeff * front_elem) for front_elem in front.oplist] # type: ignore
)
else:
if self.num_qubits != front.num_qubits:
raise ValueError(
"eval does not support operands with differing numbers of qubits, "
"{} and {}, respectively.".format(self.num_qubits, front.num_qubits)
)
if isinstance(front, DictStateFn):
new_dict = defaultdict(int)
corrected_x_bits = self.primitive.table.X
corrected_z_bits = self.primitive.table.Z
coeffs = self.primitive.coeffs
for bstr, v in front.primitive.items():
bitstr = np.fromiter(bstr, dtype=int).astype(bool)
new_b_str = np.logical_xor(bitstr, corrected_x_bits)
new_str = ["".join([str(b) for b in bs]) for bs in new_b_str.astype(int)]
z_factor = np.product(1 - 2 * np.logical_and(bitstr, corrected_z_bits), axis=1)
y_factor = np.product(
np.sqrt(1 - 2 * np.logical_and(corrected_x_bits, corrected_z_bits) + 0j),
axis=1,
)
for i, n_str in enumerate(new_str):
new_dict[n_str] += v * z_factor[i] * y_factor[i] * coeffs[i]
return DictStateFn(new_dict, coeff=self.coeff * front.coeff)
elif isinstance(front, StateFn) and front.is_measurement:
raise ValueError("Operator composed with a measurement is undefined.")
# Composable types with PauliOp
elif isinstance(front, (PauliSumOp, PauliOp, CircuitOp, CircuitStateFn)):
return self.compose(front).eval()
# Covers VectorStateFn and OperatorStateFn
return self.to_matrix_op().eval(front.to_matrix_op())
def exp_i(self) -> OperatorBase:
""" Return a ``CircuitOp`` equivalent to e^-iH for this operator H. """
# TODO: optimize for some special cases
from ..evolutions.evolved_op import EvolvedOp
return EvolvedOp(self)
def to_instruction(self) -> Instruction:
return self.to_matrix_op().to_circuit().to_instruction() # type: ignore
def to_pauli_op(self, massive: bool = False) -> OperatorBase:
from .pauli_op import PauliOp
def to_real(x):
return x.real if np.isreal(x) else x
def to_native(x):
return x.item() if isinstance(x, np.generic) else x
if len(self.primitive) == 1:
return PauliOp(
Pauli((self.primitive.table.Z[0], self.primitive.table.X[0])), # type: ignore
to_native(to_real(self.primitive.coeffs[0])) * self.coeff, # type: ignore
)
return SummedOp(
[
PauliOp(
Pauli((s.table.Z[0], s.table.X[0])),
to_native(to_real(s.coeffs[0])),
)
for s in self.primitive
],
coeff=self.coeff,
)
def __getitem__(self, offset: Union[int, slice]) -> "PauliSumOp":
"""Allows array-indexing style access to the ``PauliSumOp``.
Args:
offset: The index of ``PauliSumOp``.
Returns:
The ``PauliSumOp`` at index ``offset``,
"""
return PauliSumOp(self.primitive[offset], self.coeff)
def __len__(self) -> int:
"""Length of ``SparsePauliOp``.
Returns:
An int equal to the length of SparsePauliOp.
"""
return len(self.primitive)
# pylint: disable=arguments-differ
def reduce(self, atol: Optional[float] = None, rtol: Optional[float] = None) -> "PauliSumOp":
"""Simplify the primitive ``SparsePauliOp``.
Args:
atol: Absolute tolerance for checking if coefficients are zero (Default: 1e-8).
rtol: Relative tolerance for checking if coefficients are zero (Default: 1e-5).
Returns:
The simplified ``PauliSumOp``.
"""
if isinstance(self.coeff, (int, float, complex)):
primitive = self.coeff * self.primitive # type: ignore
return PauliSumOp(primitive.simplify(atol=atol, rtol=rtol)) # type: ignore
return PauliSumOp(self.primitive.simplify(atol=atol, rtol=rtol), self.coeff) # type: ignore
def to_spmatrix(self) -> spmatrix:
"""Returns SciPy sparse matrix representation of the ``PauliSumOp``.
Returns:
CSR sparse matrix representation of the ``PauliSumOp``.
Raises:
ValueError: invalid parameters.
"""
return self.primitive.to_matrix(sparse=True) * self.coeff # type: ignore
@classmethod
def from_list(
cls,
pauli_list: List[Tuple[str, Union[int, float, complex]]],
coeff: Union[int, float, complex, ParameterExpression] = 1.0,
) -> "PauliSumOp":
"""Construct from a pauli_list with the form [(pauli_str, coeffs)]
Args:
pauli_list: A list of Tuple of pauli_str and coefficient.
coeff: A coefficient multiplying the primitive.
Returns:
The PauliSumOp constructed from the pauli_list.
"""
return cls(SparsePauliOp.from_list(pauli_list), coeff=coeff)
| 38.860911
| 100
| 0.603394
|
9fb82f4ad6c868c04cb3de6892833d1efd96865a
| 3,259
|
py
|
Python
|
disasm-coe.py
|
smdsbz/mips-disassembler
|
3aacedbf861d7b3628ffb5ba8dab3a1859b7f7ce
|
[
"MIT"
] | null | null | null |
disasm-coe.py
|
smdsbz/mips-disassembler
|
3aacedbf861d7b3628ffb5ba8dab3a1859b7f7ce
|
[
"MIT"
] | null | null | null |
disasm-coe.py
|
smdsbz/mips-disassembler
|
3aacedbf861d7b3628ffb5ba8dab3a1859b7f7ce
|
[
"MIT"
] | 1
|
2021-03-02T09:33:46.000Z
|
2021-03-02T09:33:46.000Z
|
# -*- coding: utf-8 -*-
import capstone as cs
'''
#### `.coe` File Format Example
```text
memory_initialization_radix=16;
memory_initialization_vector=
3C10BFC0 00000000 00000000 00000000
36100000 00000000 00000000 00000000
BE090010 00000000 00000000 00000000
8E110000 00000000 00000000 00000000
00000000 00000000 00000000 00000000
00000000 00000000 00000000 00000000
00000000 00000000 00000000 00000000
00000000 00000000 00000000 00000000
;
```
> Note:
> - The leading two lines are required!
> - One instruction word per line only!
> - Instruction words must be found in the first column!
> - Instruction words are in __BIG ENDIAN__!
'''
######## Configurations - CHANGE ME !!! ########
# path to `.coe` file to be disassemblied
filename = './ram_init_os.coe'
######## Helper Functions ########
def chr_to_num(in_: str):
if len(in_) != 1:
raise ValueError('{}: in_ = \'{}\''.format('chr_to_num', in_))
if in_ in '1234567890':
return ord(in_) - ord('0')
if in_ in 'abcdef':
return ord(in_) - ord('a') + 10
if in_ in 'ABCDEF':
return ord(in_) - ord('A') + 10
raise ValueError()
def str_to_byte(in_: str):
if len(in_) != 2:
raise ValueError('{}: in_ = \'{}\''.format('str_to_byte', in_))
return chr_to_num(in_[0]) * 16 + chr_to_num(in_[1])
def word_to_intlist(in_: str):
if len(in_) != 8:
raise ValueError('{}: in_ = \'{}\''.format('word_to_intlist', in_))
bytelist = []
for plc in range(0, 8, 2):
bytelist.append(str_to_byte(in_[plc:plc+2]))
return bytelist
######### Main #########
bytelist = [] # list of int
with open(filename, 'r') as coefile:
# skip file property lines
coefile.readline()
coefile.readline()
for lineno, line in enumerate(coefile):
if line.strip() == ';':
break
try:
bytelist.extend(word_to_intlist(line.split(' ')[0]))
except ValueError as e:
print('Caught ValueError: {}'.format(e))
break
print('')
md = cs.Cs(cs.CS_ARCH_MIPS, cs.CS_MODE_MIPS32 + cs.CS_MODE_BIG_ENDIAN)
md.mnemonic_setup(cs.mips.MIPS_INS_AUI, 'lui')
code = bytes(bytelist)
print('Address Word Instruction')
print('======= ==== ===========')
print('')
__guess_seg_end = False # static variable of `likely_segment_end()`
def likely_segment_end(mne: str):
'''
It is naiively believed that on a platform with delay slot, unconditional
branching / jumping followed by an empty delay slot, i.e. `nop`, is likely
indicating a `goto` / `return` statement in disassembly.
Inserting an empty line in the output is definitely good for your eyes.
'''
global __guess_seg_end
if (mne[0] in 'j') or (mne in ['b', 'sdbbp', 'eret']):
__guess_seg_end = True
elif __guess_seg_end and mne == 'nop':
__guess_seg_end = False
return True
return False
for instr in md.disasm(code, 0xbfc00000):
print('0x{:8x}: {:8s} {:<7s}{:s}'.format(
instr.address, instr.bytes.hex(), instr.mnemonic, instr.op_str
))
if likely_segment_end(instr.mnemonic):
print('')
| 29.627273
| 79
| 0.608162
|
53ddd43115a6b208433d405bd42cd3dbada5b331
| 719
|
py
|
Python
|
wildlifecompliance/migrations/0261_inspection_inspection_team_lead.py
|
preranaandure/wildlifecompliance
|
bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5
|
[
"Apache-2.0"
] | 1
|
2020-12-07T17:12:40.000Z
|
2020-12-07T17:12:40.000Z
|
wildlifecompliance/migrations/0261_inspection_inspection_team_lead.py
|
preranaandure/wildlifecompliance
|
bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5
|
[
"Apache-2.0"
] | 14
|
2020-01-08T08:08:26.000Z
|
2021-03-19T22:59:46.000Z
|
wildlifecompliance/migrations/0261_inspection_inspection_team_lead.py
|
preranaandure/wildlifecompliance
|
bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5
|
[
"Apache-2.0"
] | 15
|
2020-01-08T08:02:28.000Z
|
2021-11-03T06:48:32.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-07-19 02:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wildlifecompliance', '0260_auto_20190718_1653'),
]
operations = [
migrations.AddField(
model_name='inspection',
name='inspection_team_lead',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inspection_team_lead', to=settings.AUTH_USER_MODEL),
),
]
| 29.958333
| 158
| 0.703755
|
0acb43da0b65c5305ee13708c93b194ef06a86d4
| 5,139
|
py
|
Python
|
homedisplay/info_air_quality/management/commands/fetch_outside_air_quality.py
|
ojarva/home-info-display
|
873d022308732baff94d0dc2381cf9dc7dce23b7
|
[
"BSD-3-Clause"
] | 1
|
2016-11-28T04:35:06.000Z
|
2016-11-28T04:35:06.000Z
|
homedisplay/info_air_quality/management/commands/fetch_outside_air_quality.py
|
ojarva/home-info-display
|
873d022308732baff94d0dc2381cf9dc7dce23b7
|
[
"BSD-3-Clause"
] | 160
|
2015-01-01T20:59:29.000Z
|
2016-04-25T13:36:52.000Z
|
homedisplay/info_air_quality/management/commands/fetch_outside_air_quality.py
|
ojarva/home-info-display
|
873d022308732baff94d0dc2381cf9dc7dce23b7
|
[
"BSD-3-Clause"
] | 1
|
2015-02-25T21:24:01.000Z
|
2015-02-25T21:24:01.000Z
|
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from homedisplay.utils import publish_ws
import datetime
import json
import redis
import requests
class Command(BaseCommand):
args = ''
help = 'Fetches outdoor air quality information'
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__()
self.redis_instance = redis.StrictRedis()
def save_to_influx(self, datapoints):
self.redis_instance.publish("influx-update-pubsub", json.dumps(datapoints))
def get_data(self, station_id, sensor):
session = requests.Session()
headers = {
"Accept-Encoding": None,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36",
}
date = datetime.date.today().strftime("%d.%m.%Y")
url = "http://www.ilmanlaatu.fi/ilmanyt/nyt/ilmanyt.php?as=Suomi&rs=86&ss={station_id}&p={sensor}&pv={date}&j=23&et=table&tj=3600&ls=suomi".format(
sensor=sensor, date=date, station_id=station_id)
session.get(url)
url_table = "http://www.ilmanlaatu.fi/php/table/observationsInTable.php?step=3600&today=1×equence=23&time={timestamp}&station={station_id}".format(
timestamp=datetime.datetime.now().strftime("%Y%m%d%H"), station_id=station_id)
headers["referer"] = url
return session.get(url_table, headers=headers).text
def process_station(self, station_config):
influx_datapoints = []
latest_values = {}
for quality_item in station_config["sensors"]:
response_text = self.get_data(
station_config["station_id"], quality_item)
soup = BeautifulSoup(response_text, "lxml")
value = None
timestamp = None
for row in soup.table.find_all("tr"):
try:
c = row["class"]
if "sinitausta" in c:
# Header row
continue
except KeyError:
pass
current_hour = None
for item in row.find_all("td"):
if current_hour is None:
current_hour = int(item.string)
if current_hour > 23:
current_hour = False
continue
elif current_hour is not False:
try:
value = float(item.string)
except TypeError:
continue
timestamp = timezone.make_aware((datetime.datetime.combine(datetime.date.today(
), datetime.time(current_hour, 0))), timezone.get_current_timezone())
if station_config["station_name"] == "Kallio":
self.redis_instance.setex(
"outdoor-air-quality-latest-%s" % quality_item, 3600 * 6, value)
influx_datapoints.append({
"measurement": "outside_air_quality",
"tags": {
"measurement": quality_item,
"location": station_config["station_name"],
},
"time": timestamp.isoformat(),
"fields": {
"value": value,
},
})
if value is not None and timestamp is not None and station_config["station_name"] == "Kallio":
latest_values[quality_item] = {
"timestamp": str(timestamp), "value": value}
if len(influx_datapoints) > 0:
self.save_to_influx(influx_datapoints)
if station_config["station_name"] == "Kallio":
publish_ws("outside_air_quality", latest_values)
def handle(self, *args, **options):
stations = [
{
"station_id": "425",
"station_name": "Kallio",
"sensors": ["particulateslt10um", "ozone", "particulateslt2.5um", "sulphurdioxide", "nitrogendioxide"],
},
{
"station_id": "781",
"station_name": "Vartiokyla",
"sensors": ["ozone", "particulateslt2.5um", "nitrogendioxide"],
},
{
"station_id": "564",
"station_name": "Mannerheimintie",
"sensors": ["particulateslt10um", "particulateslt2.5um", "nitrogendioxide"],
},
{
"station_id": "902",
"station_name": "Makelankatu",
"sensors": ["particulateslt10um", "ozone", "particulateslt2.5um", "nitrogendioxide"],
}
]
for station_config in stations:
self.process_station(station_config)
| 41.780488
| 160
| 0.526562
|
7b48d462eb8b9a2bab54c5a105e91563fc8871d3
| 1,922
|
py
|
Python
|
quantum/extensions/flavor.py
|
aristanetworks/arista-ovs-quantum
|
8e7fe17cafa70c3346e2a5d70da2d6e2637c338e
|
[
"Apache-2.0"
] | null | null | null |
quantum/extensions/flavor.py
|
aristanetworks/arista-ovs-quantum
|
8e7fe17cafa70c3346e2a5d70da2d6e2637c338e
|
[
"Apache-2.0"
] | null | null | null |
quantum/extensions/flavor.py
|
aristanetworks/arista-ovs-quantum
|
8e7fe17cafa70c3346e2a5d70da2d6e2637c338e
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Nachi Ueno, NTT MCL, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from quantum.api.v2 import attributes
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAVOR_NETWORK = 'flavor:network'
FLAVOR_ROUTER = 'flavor:router'
FLAVOR_ATTRIBUTE = {
'networks': {
FLAVOR_NETWORK: {'allow_post': True,
'allow_put': False,
'is_visible': True,
'default': attributes.ATTR_NOT_SPECIFIED}
},
'routers': {
FLAVOR_ROUTER: {'allow_post': True,
'allow_put': False,
'is_visible': True,
'default': attributes.ATTR_NOT_SPECIFIED}
}
}
class Flavor(object):
@classmethod
def get_name(cls):
return "Flavor support for network and router"
@classmethod
def get_alias(cls):
return "flavor"
@classmethod
def get_description(cls):
return "Flavor"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/flavor/api/v1.0"
@classmethod
def get_updated(cls):
return "2012-07-20T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return FLAVOR_ATTRIBUTE
else:
return {}
| 28.264706
| 78
| 0.639958
|
0b805bbf8ddac529eb87d35885d2453f03d98cb3
| 9,934
|
py
|
Python
|
api-guide/source/conf.py
|
JohnGarbutt/nova
|
21f6f7b63af920aa3a5501603c3debbcd5ec5bc5
|
[
"Apache-2.0"
] | 2
|
2018-11-18T16:03:18.000Z
|
2019-05-15T04:34:55.000Z
|
api-guide/source/conf.py
|
JohnGarbutt/nova
|
21f6f7b63af920aa3a5501603c3debbcd5ec5bc5
|
[
"Apache-2.0"
] | null | null | null |
api-guide/source/conf.py
|
JohnGarbutt/nova
|
21f6f7b63af920aa3a5501603c3debbcd5ec5bc5
|
[
"Apache-2.0"
] | 2
|
2015-12-28T14:36:29.000Z
|
2018-11-18T16:03:20.000Z
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Compute API documentation build configuration file
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
import os
import openstackdocstheme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Compute API Guide'
bug_tag = u'api-guide'
copyright = u'2015, OpenStack contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.0'
# The full version, including alpha/beta/rc tags.
release = '2.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# A few variables have to be set for the log-a-bug feature.
# giturl: The location of conf.py on Git. Must be set manually.
# gitsha: The SHA checksum of the bug description. Extracted from git log.
# bug_tag: Tag for categorizing the bug. Must be set manually.
# bug_project: Launchpad project to file bugs against.
# These variables are passed to the logabug code via html_context.
giturl = u'http://git.openstack.org/cgit/openstack/nova/tree/api-guide/source'
git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '"
gitsha = os.popen(git_cmd).read().strip('\n')
# source tree
pwd = os.popen("pwd").read().strip('\n')
# html_context allows us to pass arbitrary values into the html template
html_context = {"pwd": pwd,
"gitsha": gitsha,
"bug_tag": bug_tag,
"giturl": giturl,
"bug_project": "nova"}
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [openstackdocstheme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'compute-api-guide'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ComputeAPI.tex', u'Compute API Documentation',
u'OpenStack contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'computeapi', u'Compute API Documentation',
[u'OpenStack contributors'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ComputeAPIGuide', u'Compute API Guide',
u'OpenStack contributors', 'APIGuide',
'This guide teaches OpenStack Compute service users concepts about '
'managing resources in an OpenStack cloud with the Compute API.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
('index', u'ComputeAPIGuide', u'Compute API Guide', u'OpenStack '
'contributors')
]
| 33.33557
| 79
| 0.707369
|
26322fd52c9097cd66e6970a18fff1e4c0d01684
| 3,016
|
py
|
Python
|
DRR-Studies/script_main.py
|
PayamZandiyeh/DRR-Studies
|
8b4f153b989af6b29bcfc517b188d5c132949b34
|
[
"MIT"
] | 13
|
2019-12-14T09:26:53.000Z
|
2022-03-23T02:50:11.000Z
|
DRR-Studies/script_main.py
|
BTAROCKET/DRR-Studies
|
8b4f153b989af6b29bcfc517b188d5c132949b34
|
[
"MIT"
] | null | null | null |
DRR-Studies/script_main.py
|
BTAROCKET/DRR-Studies
|
8b4f153b989af6b29bcfc517b188d5c132949b34
|
[
"MIT"
] | 2
|
2020-01-27T17:35:49.000Z
|
2021-11-02T13:18:31.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 13:55:00 2018
@author: Payam
"""
#%% import libraries
import itk
import numpy as np
from read_image import get_itk_image_type
import main_functions
import os
from StereoFlouroscopyRegistration.io.read_image import get_itk_image_type
input_filename = '/Users/pzandiyeh/Documents/Storage/Projects/Registration/QuickData/knee_ct_volume_identity.nii'
output_filename = ['knee_test_cam1.nii', # output file name 1
'knee_test_cam2.nii'] # output file name 2
verbose = False # verbose details of all steps.
#% -------------------- Reader -------------------------
InputImageType = get_itk_image_type(input_filename)
OutputImageType= InputImageType
inputImage = itk.imread(input_filename)
#%% Set input information
sizeOutput = [1024,1400,1] # The size of output image
threshold = 0.
rot = [0., 0., 0.] # rotation in degrees in x, y, and z direction.
t = [0. ,0. ,0.] # translation in x, y, and z directions.
cor = [0. ,0. ,0.] # offset of the rotation from the center of image (3D)
spaceOutput = [0.167,0.167,1]
delta = sizeOutput[0]*spaceOutput[0]/2
focalPoint = [0.0,0.0,1000.0]
originOutput = [delta,delta,-200.0]
directionOutput = np.matrix([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
#%%
for counter_x in range(0,5,5): # Rotation in x
for counter_y in range(0,5,5): # Rotation in y
for counter_z in range(0,5,5): # Rotation in z
rot = [float(counter_x),float(counter_y),float(counter_z)] # Making the rotation into an array
print(rot)
output_directory = "/Users/pzandiyeh/Desktop/OutputImages" # output directory
if not os.path.exists(output_directory): # If the directory is not existing , create one.
os.mkdir(output_directory) # Make the directory
filetype = '.nii' # type of output image ... it can be nifti or dicom
filename = 'rx_'+str(int(rot[0])) + 'ry_'+str(int(rot[1])) + 'rz_'+str(int(rot[2]))+filetype # makes the complete path
output_filename = os.path.join(output_directory,filename) # creating the output directory where all the images are stored.
main_functions.drr(inputImage,output_filename,rot,t,focalPoint,originOutput,sizeOutput,cor,spaceOutput,directionOutput,threshold,InputImageType,OutputImageType,verbose) # creating drr.
#%% For later.
#import itk_helpers as Functions
##%%
## Transferring the 3D image so that the center of rotation of the image is located at global origin.
#
#Functions.rigid_body_transform3D(input_filename='/Volumes/Storage/Projects/Registration/QuickData/OA-BEADS-CT.nii',\
# output_filename='/Volumes/Storage/Projects/Registration/QuickData/transformed_ct.nii',\
# t =[-1.14648438, 132.85351562, 502.09999385],rot = [-90.,0.,90.])
| 41.315068
| 197
| 0.645889
|
375dbf88bd2b455a8a3652b570bbb62acff8d95e
| 222
|
py
|
Python
|
Strings/strpixel1.py
|
cyph3r-exe/python-practice-files
|
d95dd5eb14378c4f08fe4354d66356e3128de54e
|
[
"Apache-2.0"
] | null | null | null |
Strings/strpixel1.py
|
cyph3r-exe/python-practice-files
|
d95dd5eb14378c4f08fe4354d66356e3128de54e
|
[
"Apache-2.0"
] | null | null | null |
Strings/strpixel1.py
|
cyph3r-exe/python-practice-files
|
d95dd5eb14378c4f08fe4354d66356e3128de54e
|
[
"Apache-2.0"
] | null | null | null |
#Indexing part 1
# 0123456789012345678
str = "come let's practice"
print(str[4:15])
#part2
print(str[4:15:2])
#part3
print(str[:12])
#part3
print(str[16:11])
#part4
print(str[16:11:-2])
#part5
print(str[::-1])
| 11.684211
| 27
| 0.644144
|
5b94942b8e3ff23c449e1a6a1cd8293924bd3570
| 1,751
|
py
|
Python
|
trunk/VyPy/optimize/drivers/Driver.py
|
jiaxu825/VyPy
|
47100bad9dea46f12cb8bfa1ba86886e06f5c85d
|
[
"BSD-3-Clause"
] | 1
|
2021-12-28T06:39:54.000Z
|
2021-12-28T06:39:54.000Z
|
trunk/VyPy/optimize/drivers/Driver.py
|
paulcon/VyPy
|
5acb40e8d19ea76f3cd45f9cf98f252ca15e23f6
|
[
"BSD-3-Clause"
] | null | null | null |
trunk/VyPy/optimize/drivers/Driver.py
|
paulcon/VyPy
|
5acb40e8d19ea76f3cd45f9cf98f252ca15e23f6
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from VyPy.data import ibunch, obunch
# ----------------------------------------------------------------------
# Driver
# ----------------------------------------------------------------------
class Driver(object):
def __init__(self):
self.verbose = True
self.other_options = obunch()
def run(self,problem):
raise NotImplementedError
def pack_outputs(self,vars_min):
# unpack
objectives = self.problem.objectives
equalities = self.problem.constraints.equalities
inequalities = self.problem.constraints.inequalities
# start the data structure
outputs = ibunch()
outputs.variables = None
outputs.objectives = ibunch()
outputs.equalities = ibunch()
outputs.inequalities = ibunch()
outputs.success = False
outputs.messages = ibunch()
# varaiables
outputs.variables = vars_min
# objectives
for tag in objectives.tags():
outputs.objectives[tag] = objectives[tag].evaluator.function(vars_min)[tag]
# equalities
for tag in equalities.tags():
outputs.equalities[tag] = equalities[tag].evaluator.function(vars_min)[tag]
# inequalities
for tag in inequalities.tags():
outputs.inequalities[tag] = inequalities[tag].evaluator.function(vars_min)[tag]
return outputs
| 32.425926
| 92
| 0.459737
|
4ba0aa716a9867b75bea9a0575c6447dced10d2e
| 4,182
|
py
|
Python
|
examples/unsupervised_learning/TSDAE/train_askubuntu_tsdae.py
|
faezakamran/sentence-transformers
|
2158fff3aa96651b10fe367c41fdd5008a33c5c6
|
[
"Apache-2.0"
] | 7,566
|
2019-07-25T07:45:17.000Z
|
2022-03-31T22:15:35.000Z
|
examples/unsupervised_learning/TSDAE/train_askubuntu_tsdae.py
|
faezakamran/sentence-transformers
|
2158fff3aa96651b10fe367c41fdd5008a33c5c6
|
[
"Apache-2.0"
] | 1,444
|
2019-07-25T11:53:48.000Z
|
2022-03-31T15:13:32.000Z
|
examples/unsupervised_learning/TSDAE/train_askubuntu_tsdae.py
|
faezakamran/sentence-transformers
|
2158fff3aa96651b10fe367c41fdd5008a33c5c6
|
[
"Apache-2.0"
] | 1,567
|
2019-07-26T15:19:28.000Z
|
2022-03-31T19:57:35.000Z
|
from sentence_transformers import SentenceTransformer, LoggingHandler
from sentence_transformers import models, util, datasets, evaluation, losses
import logging
import os
import gzip
from torch.utils.data import DataLoader
from datetime import datetime
import sys
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = 'data/askubuntu'
result_folder = 'output/askubuntu-tsdae-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
batch_size = 8
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ['text_tokenized.txt.gz', 'dev.txt', 'test.txt', 'train_random.txt']:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get('https://github.com/taolei87/askubuntu/raw/master/'+filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, 'text_tokenized.txt.gz'), 'rt', encoding='utf8') as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: #Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append({
'query': corpus[query_id],
'positive': [corpus[pid] for pid in relevant_id],
'negative': [corpus[pid] for pid in negative_ids]
})
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'dev.txt'))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'test.txt'))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(sentence)
logging.info("{} train sentences".format(len(train_sentences)))
################# Intialize an SBERT model #################
model_name = sys.argv[1] if len(sys.argv) >= 2 else 'bert-base-uncased'
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), 'cls')
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name='AskUbuntu dev')
logging.info("Dev performance before training")
dev_evaluator(model)
total_steps = 20000
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=1000,
epochs=1,
steps_per_epoch=total_steps,
weight_decay=0,
scheduler='constantlr',
optimizer_params={'lr': 3e-5},
output_path=result_folder,
show_progress_bar=True
)
| 38.018182
| 115
| 0.694165
|
dbcafc279ee5c7f9943ffbe486e50aac3af5cdec
| 5,548
|
py
|
Python
|
sources/skill/annexes/skill_bus_toulouse.py
|
pbillerot/alexa_pi_ynh
|
685a9ded4762f8e5f6aa0d0c812c72e200a46e31
|
[
"MIT"
] | 1
|
2019-09-06T06:49:43.000Z
|
2019-09-06T06:49:43.000Z
|
sources/skill/annexes/skill_bus_toulouse.py
|
pbillerot/alexa_pi_ynh
|
685a9ded4762f8e5f6aa0d0c812c72e200a46e31
|
[
"MIT"
] | null | null | null |
sources/skill/annexes/skill_bus_toulouse.py
|
pbillerot/alexa_pi_ynh
|
685a9ded4762f8e5f6aa0d0c812c72e200a46e31
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_model.ui import SimpleCard
from os import environ
from tisseo import prochains_passages
from six import PY2
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
################################################
class SSMLStripper(HTMLParser):
def __init__(self):
self.reset()
self.full_str_list = []
if not PY2:
self.strict = False
self.convert_charrefs = True
def handle_data(self, d):
self.full_str_list.append(d)
def get_data(self):
return ''.join(self.full_str_list)
################################################
_TISSEO_API_KEY = environ['TISSEO_API_KEY']
if _TISSEO_API_KEY == "":
raise KeyError("TISSEO_API_KEY environment variable must be set")
skill_name = "Bus Toulouse"
help_text = ("Vous pouvez demander : "
"Quand passe le prochain bus à l'arrêt Moulin Armand ?")
arret_bus_slot = "arret_bus"
destination_slot = "destination"
ligne_slot = "ligne"
sb = SkillBuilder()
@sb.request_handler(can_handle_func=is_request_type("LaunchRequest"))
def launch_request_handler(handler_input):
# Handler for Skill Launch
speech = "Bienvenue dans la skill des bus de Toulouse."
handler_input.response_builder.speak(
speech + " " + help_text).ask(help_text)
return handler_input.response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.HelpIntent"))
def help_intent_handler(handler_input):
# Handler for Help Intent
handler_input.response_builder.speak(help_text).ask(help_text)
return handler_input.response_builder.response
@sb.request_handler(
can_handle_func=lambda input:
is_intent_name("AMAZON.CancelIntent")(input) or
is_intent_name("AMAZON.StopIntent")(input))
def cancel_and_stop_intent_handler(handler_input):
# Single handler for Cancel and Stop Intent
speech_text = "Au revoir!"
return handler_input.response_builder.speak(speech_text).response
@sb.request_handler(can_handle_func=is_request_type("SessionEndedRequest"))
def session_ended_request_handler(handler_input):
# Handler for Session End
return handler_input.response_builder.response
@sb.request_handler(can_handle_func=is_intent_name(
"demande_des_prochains_passages_a_un_arret"))
def demande_des_prochains_passages_a_un_arret(handler_input):
slots = handler_input.request_envelope.request.intent.slots
arret_bus_demande = None
destination = None
ligne = None
if arret_bus_slot in slots:
arret_bus_demande = slots[arret_bus_slot].value
if destination_slot in slots:
destination = slots[destination_slot].value
if ligne_slot in slots:
ligne = slots[ligne_slot].value
liste_p = prochains_passages(stop_area_name=arret_bus_demande,
destination=destination,
line=ligne)
if len(liste_p) < 1:
speech = "Dans les prochaines heures, \
aucun passage prévu à l'arrêt {}.".format(arret_bus_demande)
else:
speech = "A l'arrêt {}, ".format(arret_bus_demande)
for p in liste_p:
if p is None:
speech = "Dans les prochaines heures, \
aucun passage prévu à l'arrêt {}.".format(arret_bus_demande)
else:
if p.ligne == "A":
speech += "Le métro ligne {},".format(p.ligne)
elif p.ligne == "B":
speech += "Le métro ligne {}".format(p.ligne)
# No comma here because it is pronounced "Bi" and not "Bé"
elif p.ligne in ["T1", "T2"]:
speech += "Le tramway {},".format(p.ligne)
else:
speech += "Le bus {},".format(p.ligne)
speech += " à destination de {}, passera dans {}. ".format(
p.destination, p.timedelta_str)
handler_input.response_builder.speak(speech)
return handler_input.response_builder.response
def convert_speech_to_text(ssml_speech):
# convert ssml speech to text, by removing html tags
s = SSMLStripper()
s.feed(ssml_speech)
return s.get_data()
@sb.global_response_interceptor()
def add_card(handler_input, response):
# Add a card by translating ssml text to card content
response.card = SimpleCard(
title=skill_name,
content=convert_speech_to_text(response.output_speech.ssml))
@sb.global_response_interceptor()
def log_response(handler_input, response):
# Log response from alexa service
print("Alexa Response: {}\n".format(response))
@sb.global_request_interceptor()
def log_request(handler_input):
# Log request to alexa service
print("Alexa Request: {}\n".format(handler_input.request_envelope.request))
@sb.exception_handler(can_handle_func=lambda i, e: True)
def all_exception_handler(handler_input, exception):
# Catch all exception handler, log exception and
# respond with custom message
print("Encountered following exception: {}".format(exception))
speech = "Désolé, je n'ai pas compris. \
Dite aide pour obtenir des exemples d'utilisation."
handler_input.response_builder.speak(speech).ask(speech)
return handler_input.response_builder.response
# Handler to be provided in lambda console.
handler = sb.lambda_handler()
| 31.702857
| 79
| 0.682769
|
ddf51ee7ab73c924d4ac0eab9eb341db1075b8d0
| 807
|
py
|
Python
|
add_mentions.py
|
UoA-eResearch/pivotonline_tweets
|
ce88efeb8eccbd0c2e6df5759638de47ce3f6065
|
[
"MIT"
] | null | null | null |
add_mentions.py
|
UoA-eResearch/pivotonline_tweets
|
ce88efeb8eccbd0c2e6df5759638de47ce3f6065
|
[
"MIT"
] | null | null | null |
add_mentions.py
|
UoA-eResearch/pivotonline_tweets
|
ce88efeb8eccbd0c2e6df5759638de47ce3f6065
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
from tqdm.auto import tqdm
with open("pivotonline_tweets_2020_twarc.geojson") as f:
tweets = json.load(f)
for i, tweet in enumerate(tqdm(tweets["features"])):
for other_tweet in tweets["features"]:
screen_name = other_tweet["properties"]["screen_name"]
if screen_name == tweet["properties"]["screen_name"]: # self mention
continue
if "@" + screen_name in tweet["properties"]["text"]:
if "mentions" not in tweets["features"][i]["properties"]:
tweets["features"][i]["properties"]["mentions"] = []
if screen_name not in tweets["features"][i]["properties"]["mentions"]:
tweets["features"][i]["properties"]["mentions"].append(screen_name)
print(json.dumps(tweets,indent=2))
| 40.35
| 83
| 0.636927
|
e9d629888b01d7055bf17c1cdd04f17fc8817029
| 5,279
|
py
|
Python
|
sm-bbox/sm-bbox-annotation/sm-draw-bbox-annotation.py
|
RichardScottOZ/pyutil
|
2ec893f5e24816382bb10a6987fed66363392631
|
[
"Apache-2.0"
] | 2
|
2020-11-30T08:43:17.000Z
|
2021-07-28T02:53:03.000Z
|
sm-bbox/sm-bbox-annotation/sm-draw-bbox-annotation.py
|
RichardScottOZ/pyutil
|
2ec893f5e24816382bb10a6987fed66363392631
|
[
"Apache-2.0"
] | 5
|
2020-07-19T16:13:11.000Z
|
2021-07-22T05:23:42.000Z
|
sm-bbox/sm-bbox-annotation/sm-draw-bbox-annotation.py
|
RichardScottOZ/pyutil
|
2ec893f5e24816382bb10a6987fed66363392631
|
[
"Apache-2.0"
] | 6
|
2019-10-29T08:07:09.000Z
|
2021-07-21T08:26:12.000Z
|
#!/usr/bin/env python3
# Author: Verdi March
"""Usage: ./sm-draw-bbox-annotation.py --ann-input-dir $IMG_ROOT/annotations --img-input-dir $IMG_ROOT/images $IMG_ROOT/bboxed_images"""
import argparse
import cv2
import json
import logging
import numpy as np
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# For Python type annotations
Annotation_t = Dict[Any, Any]
Image_t = np.ndarray
class Bboxer(object):
DEFAULT = {
'ann_input_dir': './train_annotation',
'img_input_dir': './train',
'img_output_dir': None,
}
def __init__(self, **kwargs):
'''Create a new bboxer to draw bboxes from annotations to their
corresponding images.
Parameters: see Bboxer.DEFAULT
'''
for k,v in kwargs.items():
setattr(self, k,v)
# Convert string to Path object
self.ann_input_dir = Path(self.ann_input_dir)
self.img_input_dir = Path(self.img_input_dir)
# File names of all input annotations.
self.ann_input_fnames: List[str] = [str(p) for p in Path(self.ann_input_dir).glob('**/*.json')]
def apply_all(self):
'''Draw bboxes on all images.'''
for ann_fname in self.ann_input_fnames:
bboxed_img, obasename = self.apply(ann_fname)
# Save output image
ofname = os.path.join(self.img_output_dir, obasename)
cv2.imwrite(ofname, bboxed_img)
logger.debug(f'Wrote {ofname}')
def apply(self, ann_fname: str) -> Tuple[Image_t, str]:
'''Draw bboxes on an image.
:returns: (image, output_basename)
'''
logger.info(f'Draw bbox on image: {ann_fname}')
# Load JSON annotation and image of piece from local filesystem.
ann: Annotation_t = json.load(open(ann_fname))
image: Image_t = self.load_image(ann)
logger.debug(f'annotation (h x w) = {ann["image_size"][0]["height"]} x {ann["image_size"][0]["width"]}')
logger.debug(f'image (h x w) = {image.shape}')
# Draw bbox image on a copy, and return it + img filename.
bboxed_img = plot_img_with_bbox(image, ann)
obasename = ann['file'].rsplit('/', 1)[-1]
return bboxed_img, obasename
def load_image(self, ann: Annotation_t) -> Image_t:
'''Load a 3-channel image from local filesystem. This method will
map the S3 path of each image piece to local dir indicated in
`image_input_dir`.
:param ann: piece annotation.
'''
local_img_fname: Path = self.img_input_dir / ann['file']
image: Image_t = cv2.imread(str(local_img_fname), 1)
return image
def plot_img_with_bbox(img: Image_t, d: Annotation_t):
'''Draw bboxes on the copy of the original image.'''
bboxed_img = img.copy()
for i,bbox in enumerate(d['annotations']):
# Get bbox coordinates
x_min, y_min = bbox['left'], bbox['top']
x_max, y_max = x_min + bbox['width'], y_min + bbox['height']
# Workaround broken annotation and use red color. Otherwise, use green
# color for good bboxes.
if bbox['width'] < 0 or bbox['height'] < 0:
logger.debug(f'Fixing bbox {i}...')
ori_xmin, ori_ymin, ori_xmax, ori_ymax = x_min, y_min, x_max, y_max
x_min, y_min = min(ori_xmin, ori_xmax), min(ori_ymin, ori_ymax)
x_max, y_max = max(ori_xmin, ori_xmax), max(ori_ymin, ori_ymax)
color = [0,0,255] # Red bboxes
else:
color = [0,255,0] # Green bboxes
# Log info
logger.debug(f'Bbox {i}: ({x_min}, {y_min}); ({x_max}, {y_max})')
if (y_min > img.shape[0]
or y_max > img.shape[0]
or x_min > img.shape[1]
or x_max > img.shape[1]):
logger.warning(f'Bbox {i} is partially or wholly outside image.')
# Draw the rectangles. Note that if bboxes wholly outside the image
# are invisible.
cv2.rectangle(bboxed_img, pt1=(x_min, y_min), pt2=(x_max, y_max),
color=color, thickness=2)
cv2.putText(bboxed_img, f'{bbox["class_id"]}', (x_min+1,y_min+10), 1, 1, 255, 2)
return bboxed_img
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('img_output_dir', metavar='IMG_OUTPUT_DIR', help='Path to output images')
parser.add_argument('-i', '--ann-input-dir', help='Path to input annotations', default='./train_annotation')
parser.add_argument('-g', '--img-input-dir', help='Path to input images', default='./train')
parser.add_argument('-v', '--verbose', help='Verbose/debug mode', default=False, action='store_true')
args = parser.parse_args()
# Display selected configurations
logger.info(f'Annotation input dir: {args.ann_input_dir}')
logger.info(f'Image input dir: {args.img_input_dir}')
logger.info(f'Image output dir: {args.img_output_dir}')
# Set verbosity of logs
if args.verbose:
logger.setLevel(logging.DEBUG)
# Start grouping
opts = vars(args)
opts.pop('verbose')
p = Bboxer(**opts)
p.apply_all()
| 34.503268
| 136
| 0.626255
|
e3bc54f86e5db496421279ffd5c11d28d7c31618
| 389
|
py
|
Python
|
apps/users/migrations/0010_profile_rod.py
|
lucasjaroszewski/incremental-game
|
bae8823f986be0fd046bd50195d43fbc548fad90
|
[
"MIT"
] | null | null | null |
apps/users/migrations/0010_profile_rod.py
|
lucasjaroszewski/incremental-game
|
bae8823f986be0fd046bd50195d43fbc548fad90
|
[
"MIT"
] | 5
|
2021-06-09T17:54:51.000Z
|
2022-03-12T00:46:49.000Z
|
apps/users/migrations/0010_profile_rod.py
|
lucasjaroszewski/incremental-game
|
bae8823f986be0fd046bd50195d43fbc548fad90
|
[
"MIT"
] | 1
|
2020-09-27T18:26:15.000Z
|
2020-09-27T18:26:15.000Z
|
# Generated by Django 3.0.6 on 2020-07-30 20:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20200730_1732'),
]
operations = [
migrations.AddField(
model_name='profile',
name='rod',
field=models.PositiveIntegerField(default='1'),
),
]
| 20.473684
| 59
| 0.596401
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.