idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
|---|---|---|
900
|
def gpu_iuwt_recomposition ( in1 , scale_adjust , store_on_gpu , smoothed_array ) : wavelet_filter = ( 1. / 16 ) * np . array ( [ 1 , 4 , 6 , 4 , 1 ] , dtype = np . float32 ) wavelet_filter = gpuarray . to_gpu_async ( wavelet_filter ) max_scale = in1 . shape [ 0 ] + scale_adjust if smoothed_array is None : recomposition = gpuarray . zeros ( [ in1 . shape [ 1 ] , in1 . shape [ 2 ] ] , np . float32 ) else : recomposition = gpuarray . to_gpu ( smoothed_array . astype ( np . float32 ) ) try : gpu_in1 = gpuarray . to_gpu_async ( in1 . astype ( np . float32 ) ) except : gpu_in1 = in1 gpu_tmp = gpuarray . empty_like ( recomposition ) gpu_scale = gpuarray . zeros ( [ 1 ] , np . int32 ) gpu_scale += max_scale - 1 gpu_a_trous_row_kernel , gpu_a_trous_col_kernel = gpu_a_trous ( ) grid_rows = int ( in1 . shape [ 1 ] // 32 ) grid_cols = int ( in1 . shape [ 2 ] // 32 ) for i in range ( max_scale - 1 , scale_adjust - 1 , - 1 ) : gpu_a_trous_row_kernel ( recomposition , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) gpu_a_trous_col_kernel ( gpu_tmp , recomposition , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) recomposition = recomposition [ : , : ] + gpu_in1 [ i - scale_adjust , : , : ] gpu_scale -= 1 if scale_adjust > 0 : for i in range ( scale_adjust - 1 , - 1 , - 1 ) : gpu_a_trous_row_kernel ( recomposition , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) gpu_a_trous_col_kernel ( gpu_tmp , recomposition , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) gpu_scale -= 1 if store_on_gpu : return recomposition else : return recomposition . get ( )
|
This function calls the a trous algorithm code to recompose the input into a single array . This is the implementation of the isotropic undecimated wavelet transform recomposition for a GPU .
|
901
|
def unauth ( request ) : if check_key ( request ) : api = get_api ( request ) request . session . clear ( ) logout ( request ) return HttpResponseRedirect ( reverse ( 'main' ) )
|
logout and remove all session data
|
902
|
def info ( request ) : if check_key ( request ) : api = get_api ( request ) user = api . users ( id = 'self' ) print dir ( user ) return render_to_response ( 'djfoursquare/info.html' , { 'user' : user } ) else : return HttpResponseRedirect ( reverse ( 'main' ) )
|
display some user info to show we have authenticated successfully
|
903
|
def check_key ( request ) : try : access_key = request . session . get ( 'oauth_token' , None ) if not access_key : return False except KeyError : return False return True
|
Check to see if we already have an access_key stored if we do then we have already gone through OAuth . If not then we haven t and we probably need to .
|
904
|
def stream_timeout ( stream , timeout , timeout_msg = None ) : timed_out = threading . Event ( ) def timeout_func ( ) : timed_out . set ( ) stream . close ( ) timer = threading . Timer ( timeout , timeout_func ) try : timer . start ( ) for item in stream : yield item if timed_out . is_set ( ) : raise TimeoutError ( timeout_msg ) finally : timer . cancel ( ) if hasattr ( stream , '_response' ) : stream . _response . close ( )
|
Iterate over items in a streaming response from the Docker client within a timeout .
|
905
|
def get_state ( self , caller ) : if caller in self . state : return self . state [ caller ] else : rv = self . state [ caller ] = DictObject ( ) return rv
|
Get per - program state .
|
906
|
def name_to_system_object ( self , value ) : if not self . system : raise SystemNotReady if isinstance ( value , ( str , Object ) ) : rv = self . system . name_to_system_object ( value ) return rv if rv else value else : return value
|
Return object for given name registered in System namespace .
|
907
|
def cancel ( self , caller ) : for o in { i for i in self . children if isinstance ( i , AbstractCallable ) } : o . cancel ( caller )
|
Recursively cancel all threaded background processes of this Callable . This is called automatically for actions if program deactivates .
|
908
|
def give_str ( self ) : args = self . _args [ : ] kwargs = self . _kwargs return self . _give_str ( args , kwargs )
|
Give string representation of the callable .
|
909
|
def _make_request ( self , endpoint , params ) : full_params = self . _base_params . copy ( ) full_params . update ( params ) try : r = requests . get ( endpoint , full_params ) data = r . json ( ) if r . status_code == 401 and not endpoint . endswith ( 'lookup' ) : raise exceptions . UnauthorizedKeyError elif r . status_code == 400 and not endpoint . endswith ( 'shorten' ) : raise exceptions . BadApiRequest elif r . status_code == 500 : raise exceptions . ServerOrConnectionError return data , r except ValueError as e : raise exceptions . BadApiResponse ( e ) except requests . RequestException : raise exceptions . ServerOrConnectionError
|
Prepares the request and catches common errors and returns tuple of data and the request response .
|
910
|
def shorten ( self , long_url , custom_ending = None , is_secret = False ) : params = { 'url' : long_url , 'is_secret' : 'true' if is_secret else 'false' , 'custom_ending' : custom_ending } data , r = self . _make_request ( self . api_shorten_endpoint , params ) if r . status_code == 400 : if custom_ending is not None : raise exceptions . CustomEndingUnavailable ( custom_ending ) raise exceptions . BadApiRequest elif r . status_code == 403 : raise exceptions . QuotaExceededError action = data . get ( 'action' ) short_url = data . get ( 'result' ) if action == 'shorten' and short_url is not None : return short_url raise exceptions . DebugTempWarning
|
Creates a short url if valid
|
911
|
def _get_ending ( self , lookup_url ) : if lookup_url . startswith ( self . api_server ) : return lookup_url [ len ( self . api_server ) + 1 : ] return lookup_url
|
Returns the short url ending from a short url or an short url ending .
|
912
|
def lookup ( self , lookup_url , url_key = None ) : url_ending = self . _get_ending ( lookup_url ) params = { 'url_ending' : url_ending , 'url_key' : url_key } data , r = self . _make_request ( self . api_lookup_endpoint , params ) if r . status_code == 401 : if url_key is not None : raise exceptions . UnauthorizedKeyError ( 'given url_key is not valid for secret lookup.' ) raise exceptions . UnauthorizedKeyError elif r . status_code == 404 : return False action = data . get ( 'action' ) full_url = data . get ( 'result' ) if action == 'lookup' and full_url is not None : return full_url raise exceptions . DebugTempWarning
|
Looks up the url_ending to obtain information about the short url .
|
913
|
def make_argparser ( ) : parser = argparse . ArgumentParser ( prog = 'mypolr' , description = "Interacts with the Polr Project's API.\n\n" "User Guide and documentation: https://mypolr.readthedocs.io" , formatter_class = argparse . ArgumentDefaultsHelpFormatter , epilog = "NOTE: if configurations are saved, they are stored as plain text on disk, " "and can be read by anyone with access to the file." ) parser . add_argument ( "-v" , "--version" , action = "store_true" , help = "Print version and exit." ) parser . add_argument ( "url" , nargs = '?' , default = None , help = "The url to process." ) api_group = parser . add_argument_group ( 'API server arguments' , 'Use these for configure the API. Can be stored locally with --save.' ) api_group . add_argument ( "-s" , "--server" , default = None , help = "Server hosting the API." ) api_group . add_argument ( "-k" , "--key" , default = None , help = "API_KEY to authenticate against server." ) api_group . add_argument ( "--api-root" , default = DEFAULT_API_ROOT , help = "API endpoint root." ) option_group = parser . add_argument_group ( 'Action options' , 'Configure the API action to use.' ) option_group . add_argument ( "-c" , "--custom" , default = None , help = "Custom short url ending." ) option_group . add_argument ( "--secret" , action = "store_true" , help = "Set option if using secret url." ) option_group . add_argument ( "-l" , "--lookup" , action = "store_true" , help = "Perform lookup action instead of shorten action." ) manage_group = parser . add_argument_group ( 'Manage credentials' , 'Use these to save, delete or update SERVER, KEY and/or ' 'API_ROOT locally in ~/.mypolr/config.ini.' ) manage_group . add_argument ( "--save" , action = "store_true" , help = "Save configuration (including credentials) in plaintext(!)." ) manage_group . add_argument ( "--clear" , action = "store_true" , help = "Clear configuration." ) return parser
|
Setup argparse arguments .
|
914
|
def estimate_threshold ( in1 , edge_excl = 0 , int_excl = 0 ) : out1 = np . empty ( [ in1 . shape [ 0 ] ] ) mid = in1 . shape [ 1 ] / 2 if ( edge_excl != 0 ) | ( int_excl != 0 ) : if edge_excl != 0 : mask = np . zeros ( [ in1 . shape [ 1 ] , in1 . shape [ 2 ] ] ) mask [ edge_excl : - edge_excl , edge_excl : - edge_excl ] = 1 else : mask = np . ones ( [ in1 . shape [ 1 ] , in1 . shape [ 2 ] ] ) if int_excl != 0 : mask [ mid - int_excl : mid + int_excl , mid - int_excl : mid + int_excl ] = 0 else : mask = np . ones ( [ in1 . shape [ 1 ] , in1 . shape [ 2 ] ] ) for i in range ( in1 . shape [ 0 ] ) : out1 [ i ] = np . median ( np . abs ( in1 [ i , mask == 1 ] ) ) / 0.6745 return out1
|
This function estimates the noise using the MAD estimator .
|
915
|
def source_extraction ( in1 , tolerance , mode = "cpu" , store_on_gpu = False , neg_comp = False ) : if mode == "cpu" : return cpu_source_extraction ( in1 , tolerance , neg_comp ) elif mode == "gpu" : return gpu_source_extraction ( in1 , tolerance , store_on_gpu , neg_comp )
|
Convenience function for allocating work to cpu or gpu depending on the selected mode .
|
916
|
def cpu_source_extraction ( in1 , tolerance , neg_comp ) : scale_maxima = np . empty ( [ in1 . shape [ 0 ] , 1 ] ) objects = np . empty_like ( in1 , dtype = np . int32 ) object_count = np . empty ( [ in1 . shape [ 0 ] , 1 ] , dtype = np . int32 ) for i in range ( in1 . shape [ 0 ] ) : if neg_comp : scale_maxima [ i ] = np . max ( abs ( in1 [ i , : , : ] ) ) else : scale_maxima [ i ] = np . max ( in1 [ i , : , : ] ) objects [ i , : , : ] , object_count [ i ] = ndimage . label ( in1 [ i , : , : ] , structure = [ [ 1 , 1 , 1 ] , [ 1 , 1 , 1 ] , [ 1 , 1 , 1 ] ] ) for i in range ( - 1 , - in1 . shape [ 0 ] - 1 , - 1 ) : if neg_comp : if i == ( - 1 ) : tmp = ( abs ( in1 [ i , : , : ] ) >= ( tolerance * scale_maxima [ i ] ) ) * objects [ i , : , : ] else : tmp = ( abs ( in1 [ i , : , : ] ) >= ( tolerance * scale_maxima [ i ] ) ) * objects [ i , : , : ] * objects [ i + 1 , : , : ] else : if i == ( - 1 ) : tmp = ( in1 [ i , : , : ] >= ( tolerance * scale_maxima [ i ] ) ) * objects [ i , : , : ] else : tmp = ( in1 [ i , : , : ] >= ( tolerance * scale_maxima [ i ] ) ) * objects [ i , : , : ] * objects [ i + 1 , : , : ] labels = np . unique ( tmp [ tmp > 0 ] ) for j in labels : objects [ i , ( objects [ i , : , : ] == j ) ] = - 1 objects [ i , ( objects [ i , : , : ] > 0 ) ] = 0 objects [ i , : , : ] = - ( objects [ i , : , : ] ) return objects * in1 , objects
|
The following function determines connectivity within a given wavelet decomposition . These connected and labelled structures are thresholded to within some tolerance of the maximum coefficient at the scale . This determines whether on not an object is to be considered as significant . Significant objects are extracted and factored into a mask which is finally multiplied by the wavelet coefficients to return only wavelet coefficients belonging to significant objects across all scales .
|
917
|
def snr_ratio ( in1 , in2 ) : out1 = 20 * ( np . log10 ( np . linalg . norm ( in1 ) / np . linalg . norm ( in1 - in2 ) ) ) return out1
|
The following function simply calculates the signal to noise ratio between two signals .
|
918
|
def wait_for_start ( self ) : er = self . exec_rabbitmqctl ( 'wait' , [ '--pid' , '1' , '--timeout' , str ( int ( self . wait_timeout ) ) ] ) output_lines ( er , error_exc = TimeoutError )
|
Wait for the RabbitMQ process to be come up .
|
919
|
def exec_rabbitmqctl ( self , command , args = [ ] , rabbitmqctl_opts = [ '-q' ] ) : cmd = [ 'rabbitmqctl' ] + rabbitmqctl_opts + [ command ] + args return self . inner ( ) . exec_run ( cmd )
|
Execute a rabbitmqctl command inside a running container .
|
920
|
def exec_rabbitmqctl_list ( self , resources , args = [ ] , rabbitmq_opts = [ '-q' , '--no-table-headers' ] ) : command = 'list_{}' . format ( resources ) return self . exec_rabbitmqctl ( command , args , rabbitmq_opts )
|
Execute a rabbitmqctl command to list the given resources .
|
921
|
def list_users ( self ) : lines = output_lines ( self . exec_rabbitmqctl_list ( 'users' ) ) return [ _parse_rabbitmq_user ( line ) for line in lines ]
|
Run the list_users command and return a list of tuples describing the users .
|
922
|
def broker_url ( self ) : return 'amqp://{}:{}@{}/{}' . format ( self . user , self . password , self . name , self . vhost )
|
Returns a broker URL for use with Celery .
|
923
|
def exec_pg_success ( self , cmd ) : result = self . inner ( ) . exec_run ( cmd , user = 'postgres' ) assert result . exit_code == 0 , result . output . decode ( 'utf-8' ) return result
|
Execute a command inside a running container as the postgres user asserting success .
|
924
|
def clean ( self ) : self . exec_pg_success ( [ 'dropdb' , '-U' , self . user , self . database ] ) self . exec_pg_success ( [ 'createdb' , '-U' , self . user , self . database ] )
|
Remove all data by dropping and recreating the configured database .
|
925
|
def exec_psql ( self , command , psql_opts = [ '-qtA' ] ) : cmd = [ 'psql' ] + psql_opts + [ '--dbname' , self . database , '-U' , self . user , '-c' , command , ] return self . inner ( ) . exec_run ( cmd , user = 'postgres' )
|
Execute a psql command inside a running container . By default the container s database is connected to .
|
926
|
def list_databases ( self ) : lines = output_lines ( self . exec_psql ( '\\list' ) ) return [ line . split ( '|' ) for line in lines ]
|
Runs the \\ list command and returns a list of column values with information about all databases .
|
927
|
def list_tables ( self ) : lines = output_lines ( self . exec_psql ( '\\dt' ) ) return [ line . split ( '|' ) for line in lines ]
|
Runs the \\ dt command and returns a list of column values with information about all tables in the database .
|
928
|
def list_users ( self ) : lines = output_lines ( self . exec_psql ( '\\du' ) ) return [ line . split ( '|' ) for line in lines ]
|
Runs the \\ du command and returns a list of column values with information about all user roles .
|
929
|
def database_url ( self ) : return 'postgres://{}:{}@{}/{}' . format ( self . user , self . password , self . name , self . database )
|
Returns a database URL for use with DJ - Database - URL and similar libraries .
|
930
|
def from_config ( config ) : matrix = { } variables = config . keys ( ) for entries in product ( * config . values ( ) ) : combination = dict ( zip ( variables , entries ) ) include = True for value in combination . values ( ) : for reducer in value . reducers : if reducer . pattern == '-' : match = not combination [ reducer . variable ] . value else : match = fnmatch ( combination [ reducer . variable ] . value , reducer . pattern ) if match if reducer . is_exclude else not match : include = False if include : key = '-' . join ( entry . alias for entry in entries if entry . alias ) data = dict ( zip ( variables , ( entry . value for entry in entries ) ) ) if key in matrix and data != matrix [ key ] : raise DuplicateEnvironment ( key , data , matrix [ key ] ) matrix [ key ] = data return matrix
|
Generate a matrix from a configuration dictionary .
|
931
|
def flush ( self ) : self . logger . debug ( 'Flush joining' ) self . queue . join ( ) self . logger . debug ( 'Flush joining ready' )
|
This only needs to be called manually from unit tests
|
932
|
def output_lines ( output , encoding = 'utf-8' , error_exc = None ) : if isinstance ( output , ExecResult ) : exit_code , output = output if exit_code != 0 and error_exc is not None : raise error_exc ( output . decode ( encoding ) ) return output . decode ( encoding ) . splitlines ( )
|
Convert bytestring container output or the result of a container exec command into a sequence of unicode lines .
|
933
|
def get_file ( self , fid ) : url = self . get_file_url ( fid ) return self . conn . get_raw_data ( url )
|
Get file from WeedFS .
|
934
|
def get_file_url ( self , fid , public = None ) : try : volume_id , rest = fid . strip ( ) . split ( "," ) except ValueError : raise BadFidFormat ( "fid must be in format: <volume_id>,<file_name_hash>" ) file_location = self . get_file_location ( volume_id ) if public is None : public = self . use_public_url volume_url = file_location . public_url if public else file_location . url url = "http://{volume_url}/{fid}" . format ( volume_url = volume_url , fid = fid ) return url
|
Get url for the file
|
935
|
def get_file_location ( self , volume_id ) : url = ( "http://{master_addr}:{master_port}/" "dir/lookup?volumeId={volume_id}" ) . format ( master_addr = self . master_addr , master_port = self . master_port , volume_id = volume_id ) data = json . loads ( self . conn . get_data ( url ) ) _file_location = random . choice ( data [ 'locations' ] ) FileLocation = namedtuple ( 'FileLocation' , "public_url url" ) return FileLocation ( _file_location [ 'publicUrl' ] , _file_location [ 'url' ] )
|
Get location for the file WeedFS volume is choosed randomly
|
936
|
def get_file_size ( self , fid ) : url = self . get_file_url ( fid ) res = self . conn . head ( url ) if res is not None : size = res . headers . get ( "content-length" , None ) if size is not None : return int ( size ) return None
|
Gets size of uploaded file Or None if file doesn t exist .
|
937
|
def file_exists ( self , fid ) : res = self . get_file_size ( fid ) if res is not None : return True return False
|
Checks if file with provided fid exists
|
938
|
def delete_file ( self , fid ) : url = self . get_file_url ( fid ) return self . conn . delete_data ( url )
|
Delete file from WeedFS
|
939
|
def upload_file ( self , path = None , stream = None , name = None , ** kwargs ) : params = "&" . join ( [ "%s=%s" % ( k , v ) for k , v in kwargs . items ( ) ] ) url = "http://{master_addr}:{master_port}/dir/assign{params}" . format ( master_addr = self . master_addr , master_port = self . master_port , params = "?" + params if params else '' ) data = json . loads ( self . conn . get_data ( url ) ) if data . get ( "error" ) is not None : return None post_url = "http://{url}/{fid}" . format ( url = data [ 'publicUrl' if self . use_public_url else 'url' ] , fid = data [ 'fid' ] ) if path is not None : filename = os . path . basename ( path ) with open ( path , "rb" ) as file_stream : res = self . conn . post_file ( post_url , filename , file_stream ) elif stream is not None and name is not None : res = self . conn . post_file ( post_url , name , stream ) else : raise ValueError ( "If `path` is None then *both* `stream` and `name` must not" " be None " ) response_data = json . loads ( res ) if "size" in response_data : return data . get ( 'fid' ) return None
|
Uploads file to WeedFS
|
940
|
def vacuum ( self , threshold = 0.3 ) : url = ( "http://{master_addr}:{master_port}/" "vol/vacuum?garbageThreshold={threshold}" ) . format ( master_addr = self . master_addr , master_port = self . master_port , threshold = threshold ) res = self . conn . get_data ( url ) if res is not None : return True return False
|
Force garbage collection
|
941
|
def version ( self ) : url = "http://{master_addr}:{master_port}/dir/status" . format ( master_addr = self . master_addr , master_port = self . master_port ) data = self . conn . get_data ( url ) response_data = json . loads ( data ) return response_data . get ( "Version" )
|
Returns Weed - FS master version
|
942
|
def fft_convolve ( in1 , in2 , conv_device = "cpu" , conv_mode = "linear" , store_on_gpu = False ) : if conv_device == 'gpu' : if conv_mode == "linear" : fft_in1 = pad_array ( in1 ) fft_in1 = gpu_r2c_fft ( fft_in1 , store_on_gpu = True ) fft_in2 = in2 conv_in1_in2 = fft_in1 * fft_in2 conv_in1_in2 = contiguous_slice ( fft_shift ( gpu_c2r_ifft ( conv_in1_in2 , is_gpuarray = True , store_on_gpu = True ) ) ) if store_on_gpu : return conv_in1_in2 else : return conv_in1_in2 . get ( ) elif conv_mode == "circular" : fft_in1 = gpu_r2c_fft ( in1 , store_on_gpu = True ) fft_in2 = in2 conv_in1_in2 = fft_in1 * fft_in2 conv_in1_in2 = fft_shift ( gpu_c2r_ifft ( conv_in1_in2 , is_gpuarray = True , store_on_gpu = True ) ) if store_on_gpu : return conv_in1_in2 else : return conv_in1_in2 . get ( ) else : if conv_mode == "linear" : fft_in1 = pad_array ( in1 ) fft_in2 = in2 out1_slice = tuple ( slice ( 0.5 * sz , 1.5 * sz ) for sz in in1 . shape ) return np . require ( np . fft . fftshift ( np . fft . irfft2 ( fft_in2 * np . fft . rfft2 ( fft_in1 ) ) ) [ out1_slice ] , np . float32 , 'C' ) elif conv_mode == "circular" : return np . fft . fftshift ( np . fft . irfft2 ( in2 * np . fft . rfft2 ( in1 ) ) )
|
This function determines the convolution of two inputs using the FFT . Contains an implementation for both CPU and GPU .
|
943
|
def gpu_r2c_fft ( in1 , is_gpuarray = False , store_on_gpu = False ) : if is_gpuarray : gpu_in1 = in1 else : gpu_in1 = gpuarray . to_gpu_async ( in1 . astype ( np . float32 ) ) output_size = np . array ( in1 . shape ) output_size [ 1 ] = 0.5 * output_size [ 1 ] + 1 gpu_out1 = gpuarray . empty ( [ output_size [ 0 ] , output_size [ 1 ] ] , np . complex64 ) gpu_plan = Plan ( gpu_in1 . shape , np . float32 , np . complex64 ) fft ( gpu_in1 , gpu_out1 , gpu_plan ) if store_on_gpu : return gpu_out1 else : return gpu_out1 . get ( )
|
This function makes use of the scikits implementation of the FFT for GPUs to take the real to complex FFT .
|
944
|
def gpu_c2r_ifft ( in1 , is_gpuarray = False , store_on_gpu = False ) : if is_gpuarray : gpu_in1 = in1 else : gpu_in1 = gpuarray . to_gpu_async ( in1 . astype ( np . complex64 ) ) output_size = np . array ( in1 . shape ) output_size [ 1 ] = 2 * ( output_size [ 1 ] - 1 ) gpu_out1 = gpuarray . empty ( [ output_size [ 0 ] , output_size [ 1 ] ] , np . float32 ) gpu_plan = Plan ( output_size , np . complex64 , np . float32 ) ifft ( gpu_in1 , gpu_out1 , gpu_plan ) scale_fft ( gpu_out1 ) if store_on_gpu : return gpu_out1 else : return gpu_out1 . get ( )
|
This function makes use of the scikits implementation of the FFT for GPUs to take the complex to real IFFT .
|
945
|
def pad_array ( in1 ) : padded_size = 2 * np . array ( in1 . shape ) out1 = np . zeros ( [ padded_size [ 0 ] , padded_size [ 1 ] ] ) out1 [ padded_size [ 0 ] / 4 : 3 * padded_size [ 0 ] / 4 , padded_size [ 1 ] / 4 : 3 * padded_size [ 1 ] / 4 ] = in1 return out1
|
Simple convenience function to pad arrays for linear convolution .
|
946
|
def is_dragon ( host , timeout = 1 ) : try : r = requests . get ( 'http://{}/' . format ( host ) , timeout = timeout ) if r . status_code == 200 : if '<title>DragonMint</title>' in r . text or '<title>AsicMiner</title>' in r . text : return True except requests . exceptions . RequestException : pass return False
|
Check if host is a dragon .
|
947
|
def updatePools ( self , pool1 , username1 , password1 , pool2 = None , username2 = None , password2 = None , pool3 = None , username3 = None , password3 = None ) : return self . __post ( '/api/updatePools' , data = { 'Pool1' : pool1 , 'UserName1' : username1 , 'Password1' : password1 , 'Pool2' : pool2 , 'UserName2' : username2 , 'Password2' : password2 , 'Pool3' : pool3 , 'UserName3' : username3 , 'Password3' : password3 , } )
|
Change the pools of the miner . This call will restart cgminer .
|
948
|
def updatePassword ( self , user , currentPassword , newPassword ) : return self . __post ( '/api/updatePassword' , data = { 'user' : user , 'currentPassword' : currentPassword , 'newPassword' : newPassword } )
|
Change the password of a user .
|
949
|
def updateNetwork ( self , dhcp = 'dhcp' , ipaddress = None , netmask = None , gateway = None , dns = None ) : return self . __post ( '/api/updateNetwork' , data = { 'dhcp' : dhcp , 'ipaddress' : ipaddress , 'netmask' : netmask , 'gateway' : gateway , 'dns' : json . dumps ( dns ) } )
|
Change the current network settings .
|
950
|
def upgradeUpload ( self , file ) : files = { 'upfile' : open ( file , 'rb' ) } return self . __post_files ( '/upgrade/upload' , files = files )
|
Upgrade the firmware of the miner .
|
951
|
def is_program ( self ) : from automate . callables import Empty return not ( isinstance ( self . on_activate , Empty ) and isinstance ( self . on_deactivate , Empty ) and isinstance ( self . on_update , Empty ) )
|
A property which can be used to check if StatusObject uses program features or not .
|
952
|
def get_as_datadict ( self ) : d = super ( ) . get_as_datadict ( ) d . update ( dict ( status = self . status , data_type = self . data_type , editable = self . editable ) ) return d
|
Get data of this object as a data dictionary . Used by websocket service .
|
953
|
def _do_change_status ( self , status , force = False ) : self . system . worker_thread . put ( DummyStatusWorkerTask ( self . _request_status_change_in_queue , status , force = force ) )
|
This function is called by - set_status - _update_program_stack if active program is being changed - thia may be launched by sensor status change . status lock is necessary because these happen from different threads .
|
954
|
def activate_program ( self , program ) : self . logger . debug ( "activate_program %s" , program ) if program in self . program_stack : return with self . _program_lock : self . logger . debug ( "activate_program got through %s" , program ) self . program_stack . append ( program ) self . _update_program_stack ( )
|
Called by program which desires to manipulate this actuator when it is activated .
|
955
|
def deactivate_program ( self , program ) : self . logger . debug ( "deactivate_program %s" , program ) with self . _program_lock : self . logger . debug ( "deactivate_program got through %s" , program ) if program not in self . program_stack : import ipdb ipdb . set_trace ( ) self . program_stack . remove ( program ) if program in self . program_status : del self . program_status [ program ] self . _update_program_stack ( )
|
Called by program when it is deactivated .
|
956
|
def stream_logs ( container , timeout = 10.0 , ** logs_kwargs ) : stream = container . logs ( stream = True , ** logs_kwargs ) return stream_timeout ( stream , timeout , 'Timeout waiting for container logs.' )
|
Stream logs from a Docker container within a timeout .
|
957
|
def fetch_image ( client , name ) : try : image = client . images . get ( name ) except docker . errors . ImageNotFound : name , tag = _parse_image_tag ( name ) tag = 'latest' if tag is None else tag log . info ( "Pulling tag '{}' for image '{}'..." . format ( tag , name ) ) image = client . images . pull ( name , tag = tag ) log . debug ( "Found image '{}' for tag '{}'" . format ( image . id , name ) ) return image
|
Fetch an image if it isn t already present .
|
958
|
def _get_id_and_model ( self , id_or_model ) : if isinstance ( id_or_model , self . collection . model ) : model = id_or_model elif isinstance ( id_or_model , str ) : model = self . collection . get ( id_or_model ) else : raise TypeError ( 'Unexpected type {}, expected {} or {}' . format ( type ( id_or_model ) , str , self . collection . model ) ) return model . id , model
|
Get both the model and ID of an object that could be an ID or a model .
|
959
|
def create ( self , name , * args , ** kwargs ) : resource_name = self . _resource_name ( name ) log . info ( "Creating {} '{}'..." . format ( self . _model_name , resource_name ) ) resource = self . collection . create ( * args , name = resource_name , ** kwargs ) self . _ids . add ( resource . id ) return resource
|
Create an instance of this resource type .
|
960
|
def remove ( self , resource , ** kwargs ) : log . info ( "Removing {} '{}'..." . format ( self . _model_name , resource . name ) ) resource . remove ( ** kwargs ) self . _ids . remove ( resource . id )
|
Remove an instance of this resource type .
|
961
|
def remove ( self , container , force = True , volumes = True ) : super ( ) . remove ( container , force = force , v = volumes )
|
Remove a container .
|
962
|
def get_default ( self , create = True ) : if self . _default_network is None and create : log . debug ( "Creating default network..." ) self . _default_network = self . create ( 'default' , driver = 'bridge' ) return self . _default_network
|
Get the default bridge network that containers are connected to if no other network options are specified .
|
963
|
def _helper_for_model ( self , model_type ) : if model_type is models . containers . Container : return self . containers if model_type is models . images . Image : return self . images if model_type is models . networks . Network : return self . networks if model_type is models . volumes . Volume : return self . volumes raise ValueError ( 'Unknown model type {}' . format ( model_type ) )
|
Get the helper for a given type of Docker model . For use by resource definitions .
|
964
|
def teardown ( self ) : self . containers . _teardown ( ) self . networks . _teardown ( ) self . volumes . _teardown ( ) self . _client . api . close ( )
|
Clean up all resources when we re done with them .
|
965
|
def exec_redis_cli ( self , command , args = [ ] , db = 0 , redis_cli_opts = [ ] ) : cli_opts = [ '-n' , str ( db ) ] + redis_cli_opts cmd = [ 'redis-cli' ] + cli_opts + [ command ] + [ str ( a ) for a in args ] return self . inner ( ) . exec_run ( cmd )
|
Execute a redis - cli command inside a running container .
|
966
|
def list_keys ( self , pattern = '*' , db = 0 ) : lines = output_lines ( self . exec_redis_cli ( 'KEYS' , [ pattern ] , db = db ) ) return [ ] if lines == [ '' ] else lines
|
Run the KEYS command and return the list of matching keys .
|
967
|
def threaded ( system , func , * args , ** kwargs ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : try : return func ( * args , ** kwargs ) except Exception as e : if system . raven_client : system . raven_client . captureException ( ) logger . exception ( 'Exception occurred in thread: %s' , e ) return False return lambda : wrapper ( * args , ** kwargs )
|
uses thread_init as a decorator - style
|
968
|
def match ( self , item ) : if self . _position == len ( self . _matchers ) : raise RuntimeError ( 'Matcher exhausted, no more matchers to use' ) matcher = self . _matchers [ self . _position ] if matcher ( item ) : self . _position += 1 if self . _position == len ( self . _matchers ) : return True return False
|
Return True if the expected matchers are matched in the expected order otherwise False .
|
969
|
def match ( self , item ) : if not self . _unused_matchers : raise RuntimeError ( 'Matcher exhausted, no more matchers to use' ) for matcher in self . _unused_matchers : if matcher ( item ) : self . _used_matchers . append ( matcher ) break if not self . _unused_matchers : return True return False
|
Return True if the expected matchers are matched in any order otherwise False .
|
970
|
def moresane_by_scale ( self , start_scale = 1 , stop_scale = 20 , subregion = None , sigma_level = 4 , loop_gain = 0.1 , tolerance = 0.75 , accuracy = 1e-6 , major_loop_miter = 100 , minor_loop_miter = 30 , all_on_gpu = False , decom_mode = "ser" , core_count = 1 , conv_device = 'cpu' , conv_mode = 'linear' , extraction_mode = 'cpu' , enforce_positivity = False , edge_suppression = False , edge_offset = 0 , flux_threshold = 0 , neg_comp = False , edge_excl = 0 , int_excl = 0 ) : dirty_data = self . dirty_data scale_count = start_scale while not ( self . complete ) : logger . info ( "MORESANE at scale {}" . format ( scale_count ) ) self . moresane ( subregion = subregion , scale_count = scale_count , sigma_level = sigma_level , loop_gain = loop_gain , tolerance = tolerance , accuracy = accuracy , major_loop_miter = major_loop_miter , minor_loop_miter = minor_loop_miter , all_on_gpu = all_on_gpu , decom_mode = decom_mode , core_count = core_count , conv_device = conv_device , conv_mode = conv_mode , extraction_mode = extraction_mode , enforce_positivity = enforce_positivity , edge_suppression = edge_suppression , edge_offset = edge_offset , flux_threshold = flux_threshold , neg_comp = neg_comp , edge_excl = edge_excl , int_excl = int_excl ) self . dirty_data = self . residual scale_count += 1 if ( scale_count > ( np . log2 ( self . dirty_data . shape [ 0 ] ) ) - 1 ) : logger . info ( "Maximum scale reached - finished." ) break if ( scale_count > stop_scale ) : logger . info ( "Maximum scale reached - finished." ) break self . dirty_data = dirty_data self . complete = False
|
Extension of the MORESANE algorithm . This takes a scale - by - scale approach attempting to remove all sources at the lower scales before moving onto the higher ones . At each step the algorithm may return to previous scales to remove the sources uncovered by the deconvolution .
|
971
|
def restore ( self ) : clean_beam , beam_params = beam_fit ( self . psf_data , self . cdelt1 , self . cdelt2 ) if np . all ( np . array ( self . psf_data_shape ) == 2 * np . array ( self . dirty_data_shape ) ) : self . restored = np . fft . fftshift ( np . fft . irfft2 ( np . fft . rfft2 ( conv . pad_array ( self . model ) ) * np . fft . rfft2 ( clean_beam ) ) ) self . restored = self . restored [ self . dirty_data_shape [ 0 ] / 2 : - self . dirty_data_shape [ 0 ] / 2 , self . dirty_data_shape [ 1 ] / 2 : - self . dirty_data_shape [ 1 ] / 2 ] else : self . restored = np . fft . fftshift ( np . fft . irfft2 ( np . fft . rfft2 ( self . model ) * np . fft . rfft2 ( clean_beam ) ) ) self . restored += self . residual self . restored = self . restored . astype ( np . float32 ) return beam_params
|
This method constructs the restoring beam and then adds the convolution to the residual .
|
972
|
def handle_input ( self , input_hdr ) : input_slice = input_hdr [ 'NAXIS' ] * [ 0 ] for i in range ( input_hdr [ 'NAXIS' ] ) : if input_hdr [ 'CTYPE%d' % ( i + 1 ) ] . startswith ( "RA" ) : input_slice [ - 1 ] = slice ( None ) if input_hdr [ 'CTYPE%d' % ( i + 1 ) ] . startswith ( "DEC" ) : input_slice [ - 2 ] = slice ( None ) return input_slice
|
This method tries to ensure that the input data has the correct dimensions .
|
973
|
def save_fits ( self , data , name ) : data = data . reshape ( 1 , 1 , data . shape [ 0 ] , data . shape [ 0 ] ) new_file = pyfits . PrimaryHDU ( data , self . img_hdu_list [ 0 ] . header ) new_file . writeto ( "{}" . format ( name ) , overwrite = True )
|
This method simply saves the model components and the residual .
|
974
|
def make_logger ( self , level = "INFO" ) : level = getattr ( logging , level . upper ( ) ) logger = logging . getLogger ( __name__ ) logger . setLevel ( logging . DEBUG ) fh = logging . FileHandler ( 'PyMORESANE.log' , mode = 'w' ) fh . setLevel ( level ) ch = logging . StreamHandler ( ) ch . setLevel ( level ) formatter = logging . Formatter ( '%(asctime)s [%(levelname)s]: %(' 'message)s' , datefmt = '[%m/%d/%Y] [%I:%M:%S]' ) fh . setFormatter ( formatter ) ch . setFormatter ( formatter ) logger . addHandler ( fh ) logger . addHandler ( ch ) return logger
|
Convenience function which creates a logger for the module .
|
975
|
def text_ui ( self ) : self . logger . info ( "Starting command line interface" ) self . help ( ) try : self . ipython_ui ( ) except ImportError : self . fallback_ui ( ) self . system . cleanup ( )
|
Start Text UI main loop
|
976
|
def _prepare_headers ( self , additional_headers = None , ** kwargs ) : user_agent = "pyseaweed/{version}" . format ( version = __version__ ) headers = { "User-Agent" : user_agent } if additional_headers is not None : headers . update ( additional_headers ) return headers
|
Prepare headers for http communication .
|
977
|
def head ( self , url , * args , ** kwargs ) : res = self . _conn . head ( url , headers = self . _prepare_headers ( ** kwargs ) ) if res . status_code == 200 : return res return None
|
Returns response to http HEAD on provided url
|
978
|
def get_data ( self , url , * args , ** kwargs ) : res = self . _conn . get ( url , headers = self . _prepare_headers ( ** kwargs ) ) if res . status_code == 200 : return res . text else : return None
|
Gets data from url as text
|
979
|
def get_raw_data ( self , url , * args , ** kwargs ) : res = self . _conn . get ( url , headers = self . _prepare_headers ( ** kwargs ) ) if res . status_code == 200 : return res . content else : return None
|
Gets data from url as bytes
|
980
|
def post_file ( self , url , filename , file_stream , * args , ** kwargs ) : res = self . _conn . post ( url , files = { filename : file_stream } , headers = self . _prepare_headers ( ** kwargs ) ) if res . status_code == 200 or res . status_code == 201 : return res . text else : return None
|
Uploads file to provided url .
|
981
|
def delete_data ( self , url , * args , ** kwargs ) : res = self . _conn . delete ( url , headers = self . _prepare_headers ( ** kwargs ) ) if res . status_code == 200 or res . status_code == 202 : return True else : return False
|
Deletes data under provided url
|
982
|
def remove_diacritic ( * diacritics ) : def _ ( text ) : return unicodedata . normalize ( "NFC" , "" . join ( ch for ch in unicodedata . normalize ( "NFD" , text ) if ch not in diacritics ) ) return _
|
Given a collection of Unicode diacritics return a function that takes a string and returns the string without those diacritics .
|
983
|
def deep_merge ( * dicts ) : result = { } for d in dicts : if not isinstance ( d , dict ) : raise Exception ( 'Can only deep_merge dicts, got {}' . format ( d ) ) for k , v in d . items ( ) : if isinstance ( v , dict ) : v = deep_merge ( result . get ( k , { } ) , v ) result [ k ] = v return result
|
Recursively merge all input dicts into a single dict .
|
984
|
def create ( self , ** kwargs ) : if self . created : raise RuntimeError ( '{} already created.' . format ( self . __model_type__ . __name__ ) ) kwargs = self . merge_kwargs ( self . _create_kwargs , kwargs ) self . _inner = self . helper . create ( self . name , * self . _create_args , ** kwargs )
|
Create an instance of this resource definition .
|
985
|
def remove ( self , ** kwargs ) : self . helper . remove ( self . inner ( ) , ** kwargs ) self . _inner = None
|
Remove an instance of this resource definition .
|
986
|
def setup ( self , helper = None , ** create_kwargs ) : if self . created : return self . set_helper ( helper ) self . create ( ** create_kwargs ) return self
|
Setup this resource so that is ready to be used in a test . If the resource has already been created this call does nothing .
|
987
|
def as_fixture ( self , name = None ) : if name is None : name = self . name def deco ( f ) : @ functools . wraps ( f ) def wrapper ( * args , ** kw ) : with self : kw [ name ] = self return f ( * args , ** kw ) return wrapper return deco
|
A decorator to inject this container into a function as a test fixture .
|
988
|
def setup ( self , helper = None , ** run_kwargs ) : if self . created : return self . set_helper ( helper ) self . run ( ** run_kwargs ) self . wait_for_start ( ) return self
|
Creates the container starts it and waits for it to completely start .
|
989
|
def teardown ( self ) : while self . _http_clients : self . _http_clients . pop ( ) . close ( ) if self . created : self . halt ( )
|
Stop and remove the container if it exists .
|
990
|
def status ( self ) : if not self . created : return None self . inner ( ) . reload ( ) return self . inner ( ) . status
|
Get the container s current status from Docker .
|
991
|
def stop ( self , timeout = 5 ) : self . inner ( ) . stop ( timeout = timeout ) self . inner ( ) . reload ( )
|
Stop the container . The container must have been created .
|
992
|
def run ( self , fetch_image = True , ** kwargs ) : self . create ( fetch_image = fetch_image , ** kwargs ) self . start ( )
|
Create the container and start it . Similar to docker run .
|
993
|
def wait_for_start ( self ) : if self . wait_matchers : matcher = UnorderedMatcher ( * self . wait_matchers ) self . wait_for_logs_matching ( matcher , timeout = self . wait_timeout )
|
Wait for the container to start .
|
994
|
def get_logs ( self , stdout = True , stderr = True , timestamps = False , tail = 'all' , since = None ) : return self . inner ( ) . logs ( stdout = stdout , stderr = stderr , timestamps = timestamps , tail = tail , since = since )
|
Get container logs .
|
995
|
def stream_logs ( self , stdout = True , stderr = True , tail = 'all' , timeout = 10.0 ) : return stream_logs ( self . inner ( ) , stdout = stdout , stderr = stderr , tail = tail , timeout = timeout )
|
Stream container output .
|
996
|
def wait_for_logs_matching ( self , matcher , timeout = 10 , encoding = 'utf-8' , ** logs_kwargs ) : wait_for_logs_matching ( self . inner ( ) , matcher , timeout = timeout , encoding = encoding , ** logs_kwargs )
|
Wait for logs matching the given matcher .
|
997
|
def http_client ( self , port = None ) : from seaworthy . client import ContainerHttpClient client = ContainerHttpClient . for_container ( self , container_port = port ) self . _http_clients . append ( client ) return client
|
Construct an HTTP client for this container .
|
998
|
def _dispatch_change_event ( self , object , trait_name , old , new , handler ) : args = self . argument_transform ( object , trait_name , old , new ) if tnotifier . _pre_change_event_tracer is not None : tnotifier . _pre_change_event_tracer ( object , trait_name , old , new , handler ) from automate . common import SystemNotReady try : self . dispatch ( handler , * args ) except SystemNotReady : pass except Exception as e : if tnotifier . _post_change_event_tracer is not None : tnotifier . _post_change_event_tracer ( object , trait_name , old , new , handler , exception = e ) tnotifier . handle_exception ( object , trait_name , old , new ) else : if tnotifier . _post_change_event_tracer is not None : tnotifier . _post_change_event_tracer ( object , trait_name , old , new , handler , exception = None )
|
Prepare and dispatch a trait change event to a listener .
|
999
|
def split ( value , precision = 1 ) : negative = False digits = precision + 1 if value < 0. : value = - value negative = True elif value == 0. : return 0. , 0 expof10 = int ( math . log10 ( value ) ) if expof10 > 0 : expof10 = ( expof10 // 3 ) * 3 else : expof10 = ( - expof10 + 3 ) // 3 * ( - 3 ) value *= 10 ** ( - expof10 ) if value >= 1000. : value /= 1000.0 expof10 += 3 elif value >= 100.0 : digits -= 2 elif value >= 10.0 : digits -= 1 if negative : value *= - 1 return value , int ( expof10 )
|
Split value into value and exponent - of - 10 where exponent - of - 10 is a multiple of 3 . This corresponds to SI prefixes .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.