idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
9,700
def get_queue_for_job ( self , job ) : if self . _queue is not None : return self . _queue key = '{0}{1}' . format ( self . queue_class . redis_queue_namespace_prefix , job . origin ) return self . queue_class . from_queue_key ( key , connection = self . connection , job_class = self . job_class )
Returns a queue to put job into .
9,701
def enqueue_job ( self , job ) : self . log . debug ( 'Pushing {0} to {1}' . format ( job . id , job . origin ) ) interval = job . meta . get ( 'interval' , None ) repeat = job . meta . get ( 'repeat' , None ) cron_string = job . meta . get ( 'cron_string' , None ) if repeat : job . meta [ 'repeat' ] = int ( repeat ) - 1 queue = self . get_queue_for_job ( job ) queue . enqueue_job ( job ) self . connection . zrem ( self . scheduled_jobs_key , job . id ) if interval : if repeat is not None : if job . meta [ 'repeat' ] == 0 : return self . connection . zadd ( self . scheduled_jobs_key , { job . id : to_unix ( datetime . utcnow ( ) ) + int ( interval ) } ) elif cron_string : if repeat is not None : if job . meta [ 'repeat' ] == 0 : return self . connection . zadd ( self . scheduled_jobs_key , { job . id : to_unix ( get_next_scheduled_time ( cron_string ) ) } )
Move a scheduled job to a queue . In addition it also does puts the job back into the scheduler if needed .
9,702
def enqueue_jobs ( self ) : self . log . debug ( 'Checking for scheduled jobs' ) jobs = self . get_jobs_to_queue ( ) for job in jobs : self . enqueue_job ( job ) self . connection . expire ( self . scheduler_key , int ( self . _interval ) + 10 ) return jobs
Move scheduled jobs into queues .
9,703
def _checkDimensionsListLike ( arrays ) : dim1 = len ( arrays ) dim2 , dim3 = arrays [ 0 ] . shape for aa in range ( 1 , dim1 ) : dim2_aa , dim3_aa = arrays [ aa ] . shape if ( dim2_aa != dim2 ) or ( dim3_aa != dim3 ) : raise _error . InvalidError ( _MDPERR [ "obj_square" ] ) return dim1 , dim2 , dim3
Check that each array in a list of arrays has the same size .
9,704
def _checkRewardsListLike ( reward , n_actions , n_states ) : try : lenR = len ( reward ) if lenR == n_actions : dim1 , dim2 , dim3 = _checkDimensionsListLike ( reward ) elif lenR == n_states : dim1 = n_actions dim2 = dim3 = lenR else : raise _error . InvalidError ( _MDPERR [ "R_shape" ] ) except AttributeError : raise _error . InvalidError ( _MDPERR [ "R_shape" ] ) return dim1 , dim2 , dim3
Check that a list - like reward input is valid .
9,705
def isSquare ( matrix ) : try : try : dim1 , dim2 = matrix . shape except AttributeError : dim1 , dim2 = _np . array ( matrix ) . shape except ValueError : return False if dim1 == dim2 : return True return False
Check that matrix is square .
9,706
def isStochastic ( matrix ) : try : absdiff = ( _np . abs ( matrix . sum ( axis = 1 ) - _np . ones ( matrix . shape [ 0 ] ) ) ) except AttributeError : matrix = _np . array ( matrix ) absdiff = ( _np . abs ( matrix . sum ( axis = 1 ) - _np . ones ( matrix . shape [ 0 ] ) ) ) return ( absdiff . max ( ) <= 10 * _np . spacing ( _np . float64 ( 1 ) ) )
Check that matrix is row stochastic .
9,707
def isNonNegative ( matrix ) : try : if ( matrix >= 0 ) . all ( ) : return True except ( NotImplementedError , AttributeError , TypeError ) : try : if ( matrix . data >= 0 ) . all ( ) : return True except AttributeError : matrix = _np . array ( matrix ) if ( matrix . data >= 0 ) . all ( ) : return True return False
Check that matrix is row non - negative .
9,708
def checkSquareStochastic ( matrix ) : if not isSquare ( matrix ) : raise _error . SquareError if not isStochastic ( matrix ) : raise _error . StochasticError if not isNonNegative ( matrix ) : raise _error . NonNegativeError
Check if matrix is a square and row - stochastic .
9,709
def isWon ( state , who ) : for w in WINS : S = sum ( 1 if ( w [ k ] == 1 and state [ k ] == who ) else 0 for k in range ( ACTIONS ) ) if S == 3 : return True return False
Test if a tic - tac - toe game has been won . Assumes that the board is in a legal state . Will test if the value 1 is in any winning combination .
9,710
def getPolicyValue ( self ) : self . _cur . execute ( "SELECT action FROM policy" ) r = self . _cur . fetchall ( ) policy = [ x [ 0 ] for x in r ] self . _cur . execute ( "SELECT value FROM V" ) r = self . _cur . fetchall ( ) value = [ x [ 0 ] for x in r ] return policy , value
Get the policy and value vectors .
9,711
def forest ( S = 3 , r1 = 4 , r2 = 2 , p = 0.1 , is_sparse = False ) : assert S > 1 , "The number of states S must be greater than 1." assert ( r1 > 0 ) and ( r2 > 0 ) , "The rewards must be non-negative." assert 0 <= p <= 1 , "The probability p must be in [0; 1]." if is_sparse : P = [ ] rows = list ( range ( S ) ) * 2 cols = [ 0 ] * S + list ( range ( 1 , S ) ) + [ S - 1 ] vals = [ p ] * S + [ 1 - p ] * S P . append ( _sp . coo_matrix ( ( vals , ( rows , cols ) ) , shape = ( S , S ) ) . tocsr ( ) ) rows = list ( range ( S ) ) cols = [ 0 ] * S vals = [ 1 ] * S P . append ( _sp . coo_matrix ( ( vals , ( rows , cols ) ) , shape = ( S , S ) ) . tocsr ( ) ) else : P = _np . zeros ( ( 2 , S , S ) ) P [ 0 , : , : ] = ( 1 - p ) * _np . diag ( _np . ones ( S - 1 ) , 1 ) P [ 0 , : , 0 ] = p P [ 0 , S - 1 , S - 1 ] = ( 1 - p ) P [ 1 , : , : ] = _np . zeros ( ( S , S ) ) P [ 1 , : , 0 ] = 1 R = _np . zeros ( ( S , 2 ) ) R [ S - 1 , 0 ] = r1 R [ : , 1 ] = _np . ones ( S ) R [ 0 , 1 ] = 0 R [ S - 1 , 1 ] = r2 return ( P , R )
Generate a MDP example based on a simple forest management scenario .
9,712
def _randDense ( states , actions , mask ) : P = _np . zeros ( ( actions , states , states ) ) R = _np . zeros ( ( actions , states , states ) ) for action in range ( actions ) : for state in range ( states ) : if mask is None : m = _np . random . random ( states ) r = _np . random . random ( ) m [ m <= r ] = 0 m [ m > r ] = 1 elif mask . shape == ( actions , states , states ) : m = mask [ action ] [ state ] else : m = mask [ state ] if m . sum ( ) == 0 : m [ _np . random . randint ( 0 , states ) ] = 1 P [ action ] [ state ] = m * _np . random . random ( states ) P [ action ] [ state ] = P [ action ] [ state ] / P [ action ] [ state ] . sum ( ) R [ action ] [ state ] = ( m * ( 2 * _np . random . random ( states ) - _np . ones ( states , dtype = int ) ) ) return ( P , R )
Generate random dense P and R . See rand for details .
9,713
def _randSparse ( states , actions , mask ) : P = [ None ] * actions R = [ None ] * actions for action in range ( actions ) : PP = _sp . dok_matrix ( ( states , states ) ) RR = _sp . dok_matrix ( ( states , states ) ) for state in range ( states ) : if mask is None : m = _np . random . random ( states ) m [ m <= 2 / 3.0 ] = 0 m [ m > 2 / 3.0 ] = 1 elif mask . shape == ( actions , states , states ) : m = mask [ action ] [ state ] else : m = mask [ state ] n = int ( m . sum ( ) ) if n == 0 : m [ _np . random . randint ( 0 , states ) ] = 1 n = 1 nz = m . nonzero ( ) if len ( nz ) == 1 : cols = nz [ 0 ] else : cols = nz [ 1 ] vals = _np . random . random ( n ) vals = vals / vals . sum ( ) reward = 2 * _np . random . random ( n ) - _np . ones ( n ) PP [ state , cols ] = vals RR [ state , cols ] = reward P [ action ] = PP . tocsr ( ) R [ action ] = RR . tocsr ( ) return ( P , R )
Generate random sparse P and R . See rand for details .
9,714
def rand ( S , A , is_sparse = False , mask = None ) : assert S > 1 , "The number of states S must be greater than 1." assert A > 1 , "The number of actions A must be greater than 1." if mask is not None : try : assert mask . shape in ( ( S , S ) , ( A , S , S ) ) , ( "'mask' must have dimensions S×S or A×S×S." ) except AttributeError : raise TypeError ( "'mask' must be a numpy array or matrix." ) if is_sparse : P , R = _randSparse ( S , A , mask ) else : P , R = _randDense ( S , A , mask ) return ( P , R )
Generate a random Markov Decision Process .
9,715
def get_base_url ( config , url ) : if not config : return None for item in config : if url . startswith ( item ) : return item for item in config : if without_http_prefix ( url ) . startswith ( without_http_prefix ( item ) ) : return item
Look through config and try to find best matching base for url
9,716
def get_config_entry ( config , url ) : if not config : return None if url in config : return config [ url ] for item in config : if without_http_prefix ( item ) == without_http_prefix ( url ) : return config [ item ]
Look through config and try to find best matching entry for url
9,717
def sha1sum ( filename ) : sha1 = hashlib . sha1 ( ) with open ( filename , 'rb' ) as f : for chunk in iter ( lambda : f . read ( 128 * sha1 . block_size ) , b'' ) : sha1 . update ( chunk ) return sha1 . hexdigest ( )
Calculates sha1 hash of a file
9,718
def walk ( pathobj , topdown = True ) : dirs , nondirs = [ ] , [ ] for child in pathobj : relpath = str ( child . relative_to ( str ( pathobj ) ) ) if relpath . startswith ( '/' ) : relpath = relpath [ 1 : ] if relpath . endswith ( '/' ) : relpath = relpath [ : - 1 ] if child . is_dir ( ) : dirs . append ( relpath ) else : nondirs . append ( relpath ) if topdown : yield pathobj , dirs , nondirs for dir in dirs : for result in walk ( pathobj / dir ) : yield result if not topdown : yield pathobj , dirs , nondirs
os . walk like function to traverse the URI like a file system .
9,719
def rest_del ( self , url , params = None , session = None , verify = True , cert = None ) : res = session . delete ( url , params = params , verify = verify , cert = cert ) return res . text , res . status_code
Perform a DELETE request to url with requests . session
9,720
def rest_put_stream ( self , url , stream , headers = None , session = None , verify = True , cert = None ) : res = session . put ( url , headers = headers , data = stream , verify = verify , cert = cert ) return res . text , res . status_code
Perform a chunked PUT request to url with requests . session This is specifically to upload files .
9,721
def rest_get_stream ( self , url , session = None , verify = True , cert = None ) : res = session . get ( url , stream = True , verify = verify , cert = cert ) return res . raw , res . status_code
Perform a chunked GET request to url with requests . session This is specifically to download files .
9,722
def is_dir ( self , pathobj ) : try : stat = self . stat ( pathobj ) return stat . is_dir except OSError as exc : if exc . errno != errno . ENOENT : raise return False
Returns True if given path is a directory
9,723
def listdir ( self , pathobj ) : stat = self . stat ( pathobj ) if not stat . is_dir : raise OSError ( 20 , "Not a directory: %s" % str ( pathobj ) ) return stat . children
Returns a list of immediate sub - directories and files in path
9,724
def mkdir ( self , pathobj , _ ) : if not pathobj . drive or not pathobj . root : raise RuntimeError ( "Full path required: '%s'" % str ( pathobj ) ) if pathobj . exists ( ) : raise OSError ( 17 , "File exists: '%s'" % str ( pathobj ) ) url = str ( pathobj ) + '/' text , code = self . rest_put ( url , session = pathobj . session , verify = pathobj . verify , cert = pathobj . cert ) if not code == 201 : raise RuntimeError ( "%s %d" % ( text , code ) )
Creates remote directory Note that this operation is not recursive
9,725
def rmdir ( self , pathobj ) : stat = self . stat ( pathobj ) if not stat . is_dir : raise OSError ( 20 , "Not a directory: '%s'" % str ( pathobj ) ) url = str ( pathobj ) + '/' text , code = self . rest_del ( url , session = pathobj . session , verify = pathobj . verify , cert = pathobj . cert ) if code not in [ 200 , 202 , 204 ] : raise RuntimeError ( "Failed to delete directory: '%s'" % text )
Removes a directory
9,726
def touch ( self , pathobj ) : if not pathobj . drive or not pathobj . root : raise RuntimeError ( 'Full path required' ) if pathobj . exists ( ) : return url = str ( pathobj ) text , code = self . rest_put ( url , session = pathobj . session , verify = pathobj . verify , cert = pathobj . cert ) if not code == 201 : raise RuntimeError ( "%s %d" % ( text , code ) )
Create an empty file
9,727
def owner ( self , pathobj ) : stat = self . stat ( pathobj ) if not stat . is_dir : return stat . modified_by else : return 'nobody'
Returns file owner This makes little sense for Artifactory but to be consistent with pathlib we return modified_by instead if available
9,728
def creator ( self , pathobj ) : stat = self . stat ( pathobj ) if not stat . is_dir : return stat . created_by else : return 'nobody'
Returns file creator This makes little sense for Artifactory but to be consistent with pathlib we return created_by instead if available
9,729
def copy ( self , src , dst , suppress_layouts = False ) : url = '/' . join ( [ src . drive , 'api/copy' , str ( src . relative_to ( src . drive ) ) . rstrip ( '/' ) ] ) params = { 'to' : str ( dst . relative_to ( dst . drive ) ) . rstrip ( '/' ) , 'suppressLayouts' : int ( suppress_layouts ) } text , code = self . rest_post ( url , params = params , session = src . session , verify = src . verify , cert = src . cert ) if code not in [ 200 , 201 ] : raise RuntimeError ( "%s" % text )
Copy artifact from src to dst
9,730
def touch ( self , mode = 0o666 , exist_ok = True ) : if self . exists ( ) and not exist_ok : raise OSError ( 17 , "File exists" , str ( self ) ) self . _accessor . touch ( self )
Create a file if it doesn t exist . Mode is ignored by Artifactory .
9,731
def deploy ( self , fobj , md5 = None , sha1 = None , parameters = { } ) : return self . _accessor . deploy ( self , fobj , md5 , sha1 , parameters )
Upload the given file object to this path
9,732
def deploy_file ( self , file_name , calc_md5 = True , calc_sha1 = True , parameters = { } ) : if calc_md5 : md5 = md5sum ( file_name ) if calc_sha1 : sha1 = sha1sum ( file_name ) target = self if self . is_dir ( ) : target = self / pathlib . Path ( file_name ) . name with open ( file_name , 'rb' ) as fobj : target . deploy ( fobj , md5 , sha1 , parameters )
Upload the given file to this path
9,733
def deploy_deb ( self , file_name , distribution , component , architecture , parameters = { } ) : params = { 'deb.distribution' : distribution , 'deb.component' : component , 'deb.architecture' : architecture } params . update ( parameters ) self . deploy_file ( file_name , parameters = params )
Convenience method to deploy . deb packages
9,734
def move ( self , dst ) : if self . drive != dst . drive : raise NotImplementedError ( "Moving between instances is not implemented yet" ) self . _accessor . move ( self , dst )
Move artifact from this path to destinaiton .
9,735
def del_properties ( self , properties , recursive = None ) : return self . _accessor . del_properties ( self , properties , recursive )
Delete properties listed in properties
9,736
def create_aql_text ( * args ) : aql_query_text = "" for arg in args : if isinstance ( arg , dict ) : arg = "({})" . format ( json . dumps ( arg ) ) elif isinstance ( arg , list ) : arg = "({})" . format ( json . dumps ( arg ) ) . replace ( "[" , "" ) . replace ( "]" , "" ) aql_query_text += arg return aql_query_text
Create AQL querty from string or list or dict arguments
9,737
def remove_watch ( self , path , superficial = False ) : wd = self . __watches . get ( path ) if wd is None : return _LOGGER . debug ( "Removing watch for watch-handle (%d): [%s]" , wd , path ) del self . __watches [ path ] self . remove_watch_with_id ( wd )
Remove our tracking information and call inotify to stop watching the given path . When a directory is removed we ll just have to remove our tracking since inotify already cleans - up the watch .
9,738
def _handle_inotify_event ( self , wd ) : b = os . read ( wd , 1024 ) if not b : return self . __buffer += b while 1 : length = len ( self . __buffer ) if length < _STRUCT_HEADER_LENGTH : _LOGGER . debug ( "Not enough bytes for a header." ) return peek_slice = self . __buffer [ : _STRUCT_HEADER_LENGTH ] header_raw = struct . unpack ( _HEADER_STRUCT_FORMAT , peek_slice ) header = _INOTIFY_EVENT ( * header_raw ) type_names = self . _get_event_names ( header . mask ) _LOGGER . debug ( "Events received in stream: {}" . format ( type_names ) ) event_length = ( _STRUCT_HEADER_LENGTH + header . len ) if length < event_length : return filename = self . __buffer [ _STRUCT_HEADER_LENGTH : event_length ] filename_bytes = filename . rstrip ( b'\0' ) self . __buffer = self . __buffer [ event_length : ] path = self . __watches_r . get ( header . wd ) if path is not None : filename_unicode = filename_bytes . decode ( 'utf8' ) yield ( header , type_names , path , filename_unicode ) buffer_length = len ( self . __buffer ) if buffer_length < _STRUCT_HEADER_LENGTH : break
Handle a series of events coming - in from inotify .
9,739
def event_gen ( self , timeout_s = None , yield_nones = True , filter_predicate = None , terminal_events = _DEFAULT_TERMINAL_EVENTS ) : self . __last_success_return = None last_hit_s = time . time ( ) while True : block_duration_s = self . __get_block_duration ( ) try : events = self . __epoll . poll ( block_duration_s ) except IOError as e : if e . errno != EINTR : raise if timeout_s is not None : time_since_event_s = time . time ( ) - last_hit_s if time_since_event_s > timeout_s : break continue for fd , event_type in events : names = self . _get_event_names ( event_type ) _LOGGER . debug ( "Events received from epoll: {}" . format ( names ) ) for ( header , type_names , path , filename ) in self . _handle_inotify_event ( fd ) : last_hit_s = time . time ( ) e = ( header , type_names , path , filename ) for type_name in type_names : if filter_predicate is not None and filter_predicate ( type_name , e ) is False : self . __last_success_return = ( type_name , e ) return elif type_name in terminal_events : raise TerminalEventException ( type_name , e ) yield e if timeout_s is not None : time_since_event_s = time . time ( ) - last_hit_s if time_since_event_s > timeout_s : break if yield_nones is True : yield None
Yield one event after another . If timeout_s is provided we ll break when no event is received for that many seconds .
9,740
def copy_project ( from_client , to_client , from_project , to_project , copy_machine_group = False ) : ret = from_client . get_project ( from_project ) try : ret = to_client . create_project ( to_project , ret . get_description ( ) ) except LogException as ex : if ex . get_error_code ( ) == 'ProjectAlreadyExist' : pass else : raise default_fetch_size = 100 offset , size = 0 , default_fetch_size while True : ret = from_client . list_logstore ( from_project , offset = offset , size = size ) count = ret . get_logstores_count ( ) total = ret . get_logstores_total ( ) for logstore_name in ret . get_logstores ( ) : ret = from_client . get_logstore ( from_project , logstore_name ) res_shard = from_client . list_shards ( from_project , logstore_name ) expected_rwshard_count = len ( [ shard for shard in res_shard . shards if shard [ 'status' ] . lower ( ) == 'readwrite' ] ) ret = to_client . create_logstore ( to_project , logstore_name , ret . get_ttl ( ) , min ( expected_rwshard_count , MAX_INIT_SHARD_COUNT ) , enable_tracking = ret . get_enable_tracking ( ) , append_meta = ret . append_meta , auto_split = ret . auto_split , max_split_shard = ret . max_split_shard , preserve_storage = ret . preserve_storage ) try : ret = from_client . get_index_config ( from_project , logstore_name ) ret = to_client . create_index ( to_project , logstore_name , ret . get_index_config ( ) ) except LogException as ex : if ex . get_error_code ( ) == 'IndexConfigNotExist' : pass else : raise offset += count if count < size or offset >= total : break offset , size = 0 , default_fetch_size while True : ret = from_client . list_logtail_config ( from_project , offset = offset , size = size ) count = ret . get_configs_count ( ) total = ret . get_configs_total ( ) for config_name in ret . get_configs ( ) : ret = from_client . get_logtail_config ( from_project , config_name ) ret = to_client . create_logtail_config ( to_project , ret . logtail_config ) offset += count if count < size or offset >= total : break offset , size = 0 , default_fetch_size while copy_machine_group : ret = from_client . list_machine_group ( from_project , offset = offset , size = size ) count = ret . get_machine_group_count ( ) total = ret . get_machine_group_total ( ) for group_name in ret . get_machine_group ( ) : ret = from_client . get_machine_group ( from_project , group_name ) ret = to_client . create_machine_group ( to_project , ret . get_machine_group ( ) ) ret = from_client . get_machine_group_applied_configs ( from_project , group_name ) for config_name in ret . get_configs ( ) : to_client . apply_config_to_machine_group ( to_project , config_name , group_name ) offset += count if count < size or offset >= total : break
copy project logstore machine group and logtail config to target project will create the target project if it doesn t exist
9,741
def copy_logstore ( from_client , from_project , from_logstore , to_logstore , to_project = None , to_client = None ) : if to_project is not None : to_client = to_client or from_client ret = from_client . get_project ( from_project ) try : ret = to_client . create_project ( to_project , ret . get_description ( ) ) except LogException as ex : if ex . get_error_code ( ) == 'ProjectAlreadyExist' : pass else : raise to_project = to_project or from_project to_client = to_client or from_client if from_client is to_client and from_project == to_project and from_logstore == to_logstore : return ret = from_client . get_logstore ( from_project , from_logstore ) res_shard = from_client . list_shards ( from_project , from_logstore ) expected_rwshard_count = len ( [ shard for shard in res_shard . shards if shard [ 'status' ] . lower ( ) == 'readwrite' ] ) try : ret = to_client . create_logstore ( to_project , to_logstore , ttl = ret . get_ttl ( ) , shard_count = min ( expected_rwshard_count , MAX_INIT_SHARD_COUNT ) , enable_tracking = ret . get_enable_tracking ( ) , append_meta = ret . append_meta , auto_split = ret . auto_split , max_split_shard = ret . max_split_shard , preserve_storage = ret . preserve_storage ) except LogException as ex : if ex . get_error_code ( ) == 'LogStoreAlreadyExist' : ret = to_client . update_logstore ( to_project , to_logstore , ttl = ret . get_ttl ( ) , enable_tracking = ret . get_enable_tracking ( ) , append_meta = ret . append_meta , auto_split = ret . auto_split , max_split_shard = ret . max_split_shard , preserve_storage = ret . preserve_storage ) res = arrange_shard ( to_client , to_project , to_logstore , min ( expected_rwshard_count , MAX_INIT_SHARD_COUNT ) ) else : raise try : ret = from_client . get_index_config ( from_project , from_logstore ) ret = to_client . create_index ( to_project , to_logstore , ret . get_index_config ( ) ) except LogException as ex : if ex . get_error_code ( ) == 'IndexConfigNotExist' : pass elif ex . get_error_code ( ) == 'IndexAlreadyExist' : ret = to_client . update_index ( to_project , to_logstore , ret . get_index_config ( ) ) pass else : raise default_fetch_size = 100 offset , size = 0 , default_fetch_size while True : ret = from_client . list_logtail_config ( from_project , offset = offset , size = size ) count = ret . get_configs_count ( ) total = ret . get_configs_total ( ) for config_name in ret . get_configs ( ) : ret = from_client . get_logtail_config ( from_project , config_name ) config = ret . logtail_config if config . logstore_name != from_logstore : continue config . config_name = to_logstore + '_' + config_name config . logstore_name = to_logstore ret = to_client . create_logtail_config ( to_project , config ) offset += count if count < size or offset >= total : break
copy logstore index logtail config to target logstore machine group are not included yet . the target logstore will be crated if not existing
9,742
def _split_one_shard_to_multiple ( client , project , logstore , shard_info , count , current_shard_count ) : distance = shard_info [ 'length' ] // count if distance <= 0 or count <= 1 : return [ shard_info [ 'info' ] ] , 0 rw_shards , increased_shard_count = { shard_info [ 'id' ] : shard_info [ 'info' ] } , 0 for x in range ( 1 , count ) : new_hash = shard_info [ 'start' ] + distance * x new_hash = hex ( new_hash ) [ 2 : ] . strip ( 'lL' ) new_hash = '0' * ( TOTAL_HASH_LENGTH - len ( new_hash ) ) + new_hash try : if x == 1 : res = client . split_shard ( project , logstore , shard_info [ 'id' ] , new_hash ) else : res = client . split_shard ( project , logstore , current_shard_count - 1 , new_hash ) for shard in res . shards : if shard [ 'status' ] == 'readonly' : del rw_shards [ shard [ 'shardID' ] ] else : rw_shards [ shard [ 'shardID' ] ] = shard current_shard_count += res . count - 1 increased_shard_count += res . count - 1 logger . info ( "split shard: project={0}, logstore={1}, shard_info={2}, count={3}, current_shard_count={4}" . format ( project , logstore , shard_info , count , current_shard_count ) ) except Exception as ex : print ( ex ) print ( x , project , logstore , shard_info , count , current_shard_count ) raise return rw_shards . values ( ) , increased_shard_count
return new_rw_shards_list increased_shard_count
9,743
def get_host_ip ( logHost ) : s = None try : s = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) s . connect ( ( logHost , 80 ) ) ip = s . getsockname ( ) [ 0 ] return ip except Exception : return '127.0.0.1' finally : if s : s . close ( )
If it is not match your local ip you should fill the PutLogsRequest parameter source by yourself .
9,744
def generate_common_reg_log_config ( json_value ) : input_detail = copy . deepcopy ( json_value [ 'inputDetail' ] ) output_detail = json_value [ 'outputDetail' ] logSample = json_value . get ( 'logSample' , '' ) config_name = json_value [ 'configName' ] logstore_name = output_detail [ 'logstoreName' ] endpoint = output_detail . get ( 'endpoint' , '' ) log_path = input_detail [ 'logPath' ] file_pattern = input_detail [ 'filePattern' ] time_format = input_detail [ 'timeFormat' ] log_begin_regex = input_detail . get ( 'logBeginRegex' , '' ) log_parse_regex = input_detail . get ( 'regex' , '' ) reg_keys = input_detail [ 'key' ] topic_format = input_detail [ 'topicFormat' ] filter_keys = input_detail [ 'filterKey' ] filter_keys_reg = input_detail [ 'filterRegex' ] log_type = input_detail . get ( 'logType' ) for item in ( 'logPath' , 'filePattern' , 'timeFormat' , 'logBeginRegex' , 'regex' , 'key' , 'topicFormat' , 'filterKey' , 'filterRegex' , 'logType' ) : if item in input_detail : del input_detail [ item ] config = CommonRegLogConfigDetail ( config_name , logstore_name , endpoint , log_path , file_pattern , time_format , log_begin_regex , log_parse_regex , reg_keys , topic_format , filter_keys , filter_keys_reg , logSample , log_type , ** input_detail ) return config
Generate common logtail config from loaded json value
9,745
def generate_apsara_log_config ( json_value ) : input_detail = json_value [ 'inputDetail' ] output_detail = json_value [ 'outputDetail' ] config_name = json_value [ 'configName' ] logSample = json_value . get ( 'logSample' , '' ) logstore_name = output_detail [ 'logstoreName' ] endpoint = output_detail . get ( 'endpoint' , '' ) log_path = input_detail [ 'logPath' ] file_pattern = input_detail [ 'filePattern' ] log_begin_regex = input_detail . get ( 'logBeginRegex' , '' ) topic_format = input_detail [ 'topicFormat' ] filter_keys = input_detail [ 'filterKey' ] filter_keys_reg = input_detail [ 'filterRegex' ] config = ApsaraLogConfigDetail ( config_name , logstore_name , endpoint , log_path , file_pattern , log_begin_regex , topic_format , filter_keys , filter_keys_reg , logSample ) return config
Generate apsara logtail config from loaded json value
9,746
def generate_logtail_config ( json_value ) : logger . warning ( "aliyun.log.LogtailConfigHelper is deprecated and will be removed in future version." "Use LogtailConfigGenerator instead" ) if json_value [ 'inputDetail' ] [ 'logType' ] == 'apsara_log' : return LogtailConfigHelper . generate_apsara_log_config ( json_value ) return LogtailConfigHelper . generate_common_reg_log_config ( json_value )
Generate logtail config from loaded json value
9,747
def _get_batch_requests ( self , timeout = None ) : reqs = [ ] s = time ( ) while len ( reqs ) < self . batch_size and ( time ( ) - s ) < timeout : try : req = self . queue . get ( block = False ) self . queue . task_done ( ) reqs . append ( req ) except Empty as ex : if self . stop_flag : break else : sleep ( 0.1 ) if not reqs : raise Empty elif len ( reqs ) <= 1 : return reqs [ 0 ] else : logitems = [ ] req = reqs [ 0 ] for req in reqs : logitems . extend ( req . get_log_items ( ) ) ret = PutLogsRequest ( self . project , self . log_store , req . topic , logitems = logitems ) ret . __record__ = req . __record__ return ret
try to get request as fast as possible once empty and stop falg or time - out just return Empty
9,748
def mmGetPlotUnionSDRActivity ( self , title = "Union SDR Activity Raster" , showReset = False , resetShading = 0.25 ) : unionSDRTrace = self . mmGetTraceUnionSDR ( ) . data columnCount = self . getNumColumns ( ) activityType = "Union SDR Activity" return self . mmGetCellTracePlot ( unionSDRTrace , columnCount , activityType , title = title , showReset = showReset , resetShading = resetShading )
Returns plot of the activity of union SDR bits .
9,749
def mmGetMetricStabilityConfusion ( self ) : self . _mmComputeSequenceRepresentationData ( ) numbers = self . _mmData [ "stabilityConfusion" ] return Metric ( self , "stability confusion" , numbers )
For each iteration that doesn t follow a reset looks at every other iteration for the same world that doesn t follow a reset and computes the number of bits that show up in one or the other set of active cells for that iteration but not both . This metric returns the distribution of those numbers .
9,750
def mmGetPlotStability ( self , title = "Stability" , showReset = False , resetShading = 0.25 ) : plot = Plot ( self , title ) self . _mmComputeSequenceRepresentationData ( ) data = self . _mmData [ "stabilityConfusion" ] plot . addGraph ( sorted ( data , reverse = True ) , position = 211 , xlabel = "Time steps" , ylabel = "Overlap" ) plot . addHistogram ( data , position = 212 , bins = 100 , xlabel = "Overlap" , ylabel = "# time steps" ) return plot
Returns plot of the overlap metric between union SDRs within a sequence .
9,751
def mmGetMetricDistinctnessConfusion ( self ) : self . _mmComputeSequenceRepresentationData ( ) numbers = self . _mmData [ "distinctnessConfusion" ] return Metric ( self , "distinctness confusion" , numbers )
For each iteration that doesn t follow a reset looks at every other iteration for every other world that doesn t follow a reset and computes the number of bits that show up in both sets of active cells those that iteration . This metric returns the distribution of those numbers .
9,752
def _mmUpdateDutyCycles ( self ) : period = self . getDutyCyclePeriod ( ) unionSDRArray = numpy . zeros ( self . getNumColumns ( ) ) unionSDRArray [ list ( self . _mmTraces [ "unionSDR" ] . data [ - 1 ] ) ] = 1 self . _mmData [ "unionSDRDutyCycle" ] = UnionTemporalPoolerMonitorMixin . _mmUpdateDutyCyclesHelper ( self . _mmData [ "unionSDRDutyCycle" ] , unionSDRArray , period ) self . _mmData [ "persistenceDutyCycle" ] = UnionTemporalPoolerMonitorMixin . _mmUpdateDutyCyclesHelper ( self . _mmData [ "persistenceDutyCycle" ] , self . _poolingActivation , period )
Update the duty cycle variables internally tracked by the TM mixin .
9,753
def _mmComputeSequenceRepresentationData ( self ) : if not self . _sequenceRepresentationDataStale : return unionSDRTrace = self . mmGetTraceUnionSDR ( ) sequenceLabelsTrace = self . mmGetTraceSequenceLabels ( ) resetsTrace = self . mmGetTraceResets ( ) n = len ( unionSDRTrace . data ) overlapMatrix = numpy . empty ( ( n , n ) , dtype = uintType ) stabilityConfusionUnionSDR = [ ] distinctnessConfusionUnionSDR = [ ] for i in xrange ( n ) : for j in xrange ( i + 1 ) : overlapUnionSDR = len ( unionSDRTrace . data [ i ] & unionSDRTrace . data [ j ] ) overlapMatrix [ i ] [ j ] = overlapUnionSDR overlapMatrix [ j ] [ i ] = overlapUnionSDR if ( i != j and sequenceLabelsTrace . data [ i ] is not None and not resetsTrace . data [ i ] and sequenceLabelsTrace . data [ j ] is not None and not resetsTrace . data [ j ] ) : if sequenceLabelsTrace . data [ i ] == sequenceLabelsTrace . data [ j ] : stabilityConfusionUnionSDR . append ( overlapUnionSDR ) else : distinctnessConfusionUnionSDR . append ( overlapUnionSDR ) self . _mmData [ "overlap" ] = overlapMatrix self . _mmData [ "stabilityConfusion" ] = stabilityConfusionUnionSDR self . _mmData [ "distinctnessConfusion" ] = distinctnessConfusionUnionSDR self . _sequenceRepresentationDataStale = False
Calculates values for the overlap distance matrix stability within a sequence and distinctness between sequences . These values are cached so that they do need to be recomputed for calls to each of several accessor methods that use these values .
9,754
def registerResearchRegion ( regionTypeName , moduleName = None ) : global _PY_REGIONS if moduleName is None : moduleName = "htmresearch.regions." + regionTypeName if regionTypeName not in _PY_REGIONS : module = __import__ ( moduleName , { } , { } , regionTypeName ) unregisteredClass = getattr ( module , regionTypeName ) Network . registerRegion ( unregisteredClass ) _PY_REGIONS . append ( regionTypeName )
Register this region so that NuPIC can later find it .
9,755
def loadNumpyImages ( self , path , key = None ) : data = np . load ( path ) if isinstance ( data , dict ) : if key is None : raise ValueError ( "Images are stored as a dict, a key must be provided!" ) try : data = data [ key ] except KeyError : raise KeyError ( "Wrong key for provided data." ) if not isinstance ( data , np . ndarray ) : raise TypeError ( "Data must be stored as a dict or numpy array." ) self . _initializeDimensions ( data ) return data
Loads images using numpy .
9,756
def _initializeDimensions ( self , inputData ) : if len ( inputData . shape ) == 2 : self . imageHeight , self . numImages = inputData . shape self . imageWidth , self . numChannels = None , None elif len ( inputData . shape ) == 3 : self . imageHeight , self . imageWidth , self . numImages = inputData . shape self . numChannels = None elif len ( inputData . shape ) == 4 : self . imageHeight , self . imageWidth , self . numChannels , self . numImages = inputData . shape else : raise ValueError ( "The provided image set has more than 4 dimensions." )
Stores the training images dimensions for convenience .
9,757
def mmGetPermanencesPlot ( self , title = None ) : plot = Plot ( self , title ) data = numpy . zeros ( ( self . getNumColumns ( ) , self . getNumInputs ( ) ) ) for i in xrange ( self . getNumColumns ( ) ) : self . getPermanence ( i , data [ i ] ) plot . add2DArray ( data , xlabel = "Permanences" , ylabel = "Column" ) return plot
Returns plot of column permanences .
9,758
def finalize ( self , params , rep ) : if params . get ( "saveNet" , True ) : saveDir = os . path . join ( params [ "path" ] , params [ "name" ] , "model_{}.pt" . format ( rep ) ) torch . save ( self . model , saveDir )
Save the full model once we are done .
9,759
def loadDatasets ( self , params ) : n_mels = 32 self . use_preprocessed_dataset = PreprocessedSpeechDataset . isValid ( self . dataDir ) if self . use_preprocessed_dataset : trainDataset = PreprocessedSpeechDataset ( self . dataDir , subset = "train" ) validationDataset = PreprocessedSpeechDataset ( self . dataDir , subset = "valid" , silence_percentage = 0 ) testDataset = PreprocessedSpeechDataset ( self . dataDir , subset = "test" , silence_percentage = 0 ) bgNoiseDataset = PreprocessedSpeechDataset ( self . dataDir , subset = "noise" , silence_percentage = 0 ) else : trainDataDir = os . path . join ( self . dataDir , "train" ) testDataDir = os . path . join ( self . dataDir , "test" ) validationDataDir = os . path . join ( self . dataDir , "valid" ) backgroundNoiseDir = os . path . join ( self . dataDir , params [ "background_noise_dir" ] ) dataAugmentationTransform = transforms . Compose ( [ ChangeAmplitude ( ) , ChangeSpeedAndPitchAudio ( ) , FixAudioLength ( ) , ToSTFT ( ) , StretchAudioOnSTFT ( ) , TimeshiftAudioOnSTFT ( ) , FixSTFTDimension ( ) , ] ) featureTransform = transforms . Compose ( [ ToMelSpectrogramFromSTFT ( n_mels = n_mels ) , DeleteSTFT ( ) , ToTensor ( 'mel_spectrogram' , 'input' ) ] ) trainDataset = SpeechCommandsDataset ( trainDataDir , transforms . Compose ( [ dataAugmentationTransform , featureTransform ] ) ) testFeatureTransform = transforms . Compose ( [ FixAudioLength ( ) , ToMelSpectrogram ( n_mels = n_mels ) , ToTensor ( 'mel_spectrogram' , 'input' ) ] ) validationDataset = SpeechCommandsDataset ( validationDataDir , testFeatureTransform , silence_percentage = 0 , ) testDataset = SpeechCommandsDataset ( testDataDir , testFeatureTransform , silence_percentage = 0 , ) bg_dataset = BackgroundNoiseDataset ( backgroundNoiseDir , transforms . Compose ( [ FixAudioLength ( ) , ToSTFT ( ) ] ) , ) bgNoiseTransform = transforms . Compose ( [ FixAudioLength ( ) , ToSTFT ( ) , AddBackgroundNoiseOnSTFT ( bg_dataset ) , ToMelSpectrogramFromSTFT ( n_mels = n_mels ) , DeleteSTFT ( ) , ToTensor ( 'mel_spectrogram' , 'input' ) ] ) bgNoiseDataset = SpeechCommandsDataset ( testDataDir , bgNoiseTransform , silence_percentage = 0 , ) weights = trainDataset . make_weights_for_balanced_classes ( ) sampler = WeightedRandomSampler ( weights , len ( weights ) ) self . train_loader = DataLoader ( trainDataset , batch_size = params [ "batch_size" ] , sampler = sampler ) self . validation_loader = DataLoader ( validationDataset , batch_size = params [ "batch_size" ] , shuffle = False ) self . test_loader = DataLoader ( testDataset , batch_size = params [ "batch_size" ] , sampler = None , shuffle = False ) self . bg_noise_loader = DataLoader ( bgNoiseDataset , batch_size = params [ "batch_size" ] , sampler = None , shuffle = False )
The GSC dataset specifies specific files to be used as training test and validation . We assume the data has already been processed according to those files into separate train test and valid directories .
9,760
def contains ( self , location ) : return self . almostEqual ( sum ( [ coord ** 2 for coord in location ] ) , self . radius ** 2 )
Checks that the provided point is on the sphere .
9,761
def contains ( self , location ) : if self . almostEqual ( location [ 0 ] ** 2 + location [ 1 ] ** 2 , self . radius ** 2 ) : return abs ( location [ 2 ] ) < self . height / 2. if self . almostEqual ( location [ 2 ] , self . height / 2. ) : return location [ 0 ] ** 2 + location [ 1 ] ** 2 < self . radius ** 2 return False
Checks that the provided point is on the cylinder .
9,762
def sampleLocation ( self ) : areaRatio = self . radius / ( self . radius + self . height ) if random . random ( ) < areaRatio : return self . _sampleLocationOnDisc ( ) else : return self . _sampleLocationOnSide ( )
Simple method to sample uniformly from a cylinder .
9,763
def sampleLocationFromFeature ( self , feature ) : if feature == "topDisc" : return self . _sampleLocationOnDisc ( top = True ) elif feature == "topEdge" : return self . _sampleLocationOnEdge ( top = True ) elif feature == "bottomDisc" : return self . _sampleLocationOnDisc ( top = False ) elif feature == "bottomEdge" : return self . _sampleLocationOnEdge ( top = False ) elif feature == "side" : return self . _sampleLocationOnSide ( ) elif feature == "random" : return self . sampleLocation ( ) else : raise NameError ( "No such feature in {}: {}" . format ( self , feature ) )
Samples a location from the provided specific features .
9,764
def _sampleLocationOnDisc ( self , top = None ) : if top is None : z = random . choice ( [ - 1 , 1 ] ) * self . height / 2. else : z = self . height / 2. if top else - self . height / 2. sampledAngle = 2 * random . random ( ) * pi sampledRadius = self . radius * sqrt ( random . random ( ) ) x , y = sampledRadius * cos ( sampledAngle ) , sampledRadius * sin ( sampledAngle ) return [ x , y , z ]
Helper method to sample from the top and bottom discs of a cylinder .
9,765
def _sampleLocationOnEdge ( self , top = None ) : if top is None : z = random . choice ( [ - 1 , 1 ] ) * self . height / 2. else : z = self . height / 2. if top else - self . height / 2. sampledAngle = 2 * random . random ( ) * pi x , y = self . radius * cos ( sampledAngle ) , self . radius * sin ( sampledAngle ) return [ x , y , z ]
Helper method to sample from the top and bottom edges of a cylinder .
9,766
def _sampleLocationOnSide ( self ) : z = random . uniform ( - 1 , 1 ) * self . height / 2. sampledAngle = 2 * random . random ( ) * pi x , y = self . radius * cos ( sampledAngle ) , self . radius * sin ( sampledAngle ) return [ x , y , z ]
Helper method to sample from the lateral surface of a cylinder .
9,767
def contains ( self , location ) : for i , coord in enumerate ( location ) : if self . almostEqual ( abs ( coord ) , self . dimensions [ i ] / 2. ) : return True return False
A location is on the box if one of the dimension is satured ) .
9,768
def sampleLocationFromFeature ( self , feature ) : if feature == "face" : return self . _sampleFromFaces ( ) elif feature == "edge" : return self . _sampleFromEdges ( ) elif feature == "vertex" : return self . _sampleFromVertices ( ) elif feature == "random" : return self . sampleLocation ( ) else : raise NameError ( "No such feature in {}: {}" . format ( self , feature ) )
Samples a location from one specific feature .
9,769
def _sampleFromFaces ( self ) : coordinates = [ random . uniform ( - 1 , 1 ) * dim / 2. for dim in self . dimensions ] dim = random . choice ( range ( self . dimension ) ) coordinates [ dim ] = self . dimensions [ dim ] / 2. * random . choice ( [ - 1 , 1 ] ) return coordinates
We start by sampling a dimension to max out then sample the sign and the other dimensions values .
9,770
def plot ( self , numPoints = 100 ) : fig = plt . figure ( ) ax = fig . add_subplot ( 111 , projection = '3d' ) x = np . linspace ( - self . dimensions [ 0 ] / 2. , self . dimensions [ 0 ] / 2. , numPoints ) y = np . linspace ( - self . dimensions [ 1 ] / 2. , self . dimensions [ 1 ] / 2. , numPoints ) z = np . linspace ( - self . dimensions [ 2 ] / 2. , self . dimensions [ 2 ] / 2. , numPoints ) Xc , Yc = np . meshgrid ( x , y ) ax . plot_surface ( Xc , Yc , - self . dimensions [ 2 ] / 2 , alpha = 0.2 , rstride = 20 , cstride = 10 ) ax . plot_surface ( Xc , Yc , self . dimensions [ 2 ] / 2 , alpha = 0.2 , rstride = 20 , cstride = 10 ) Yc , Zc = np . meshgrid ( y , z ) ax . plot_surface ( - self . dimensions [ 0 ] / 2 , Yc , Zc , alpha = 0.2 , rstride = 20 , cstride = 10 ) ax . plot_surface ( self . dimensions [ 0 ] / 2 , Yc , Zc , alpha = 0.2 , rstride = 20 , cstride = 10 ) Xc , Zc = np . meshgrid ( x , z ) ax . plot_surface ( Xc , - self . dimensions [ 1 ] / 2 , Zc , alpha = 0.2 , rstride = 20 , cstride = 10 ) ax . plot_surface ( Xc , self . dimensions [ 1 ] / 2 , Zc , alpha = 0.2 , rstride = 20 , cstride = 10 ) ax . set_xlabel ( "X" ) ax . set_ylabel ( "Y" ) ax . set_zlabel ( "Z" ) plt . title ( "{}" . format ( self ) ) return fig , ax
Specific plotting method for boxes .
9,771
def visualize ( self , numPoints = 100 ) : try : import pyqtgraph as pg import pyqtgraph . multiprocess as mp import pyqtgraph . opengl as gl except ImportError as e : print ( "PyQtGraph needs to be installed." ) return ( None , None , None , None , None ) class PlyVisWindow : def __init__ ( self ) : self . proc = mp . QtProcess ( ) self . rpg = self . proc . _import ( 'pyqtgraph' ) self . rgl = self . proc . _import ( 'pyqtgraph.opengl' ) self . rview = self . rgl . GLViewWidget ( ) self . rview . setBackgroundColor ( 'k' ) self . rview . setCameraPosition ( distance = 10 ) self . grid = self . rgl . GLGridItem ( ) self . rview . addItem ( self . grid ) self . rpg . setConfigOption ( 'background' , 'w' ) self . rpg . setConfigOption ( 'foreground' , 'k' ) def snapshot ( self , name = "" ) : self . rview . grabFrameBuffer ( ) . save ( "{}.png" . format ( name ) ) pg . mkQApp ( ) self . graphicsWindow = PlyVisWindow ( ) self . graphicsWindow . rview . setWindowTitle ( self . file ) vertices = self . vertices . data vertices = np . array ( vertices . tolist ( ) ) faces = np . array ( [ self . faces [ i ] [ 'vertex_indices' ] for i in range ( self . faces . count ) ] ) self . mesh = self . graphicsWindow . rgl . GLMeshItem ( vertexes = vertices , faces = faces , shader = 'normalColor' , drawEdges = True , drawFaces = True , computeNormals = False , smooth = False ) self . graphicsWindow . rview . addItem ( self . mesh ) self . graphicsWindow . rview . show ( ) pos = np . empty ( ( numPoints , 3 ) ) size = np . ones ( ( numPoints , ) ) color = np . ones ( ( numPoints , 4 ) ) self . scatter = self . graphicsWindow . rgl . GLScatterPlotItem ( pos = pos , size = size , color = color , pxMode = True ) self . graphicsWindow . rview . addItem ( self . scatter ) return self . scatter , self . mesh , pos , size , color
Visualization utility for models . Helps to debug the math and logic . Helps to monitor complex objects with difficult to define boundaries .
9,772
def createRandomSequences ( self , numSequences , sequenceLength ) : for _ in xrange ( numSequences ) : self . addObject ( [ numpy . random . randint ( 0 , self . numFeatures ) for _ in xrange ( sequenceLength ) ] )
Creates a set of random sequences each with sequenceLength elements and adds them to the machine .
9,773
def _addNoise ( self , pattern , noiseLevel , inputSize ) : if pattern is None : return None candidateBits = list ( set ( range ( inputSize ) ) - set ( pattern ) ) random . shuffle ( candidateBits ) newBits = set ( ) for bit in pattern : if random . random ( ) < noiseLevel : newBits . add ( candidateBits . pop ( ) ) else : newBits . add ( bit ) return newBits
Adds noise to the given pattern and returns the new one .
9,774
def _generateFeatures ( self ) : size = self . sensorInputSize bits = self . numInputBits self . features = [ ] for _ in xrange ( self . numColumns ) : self . features . append ( [ self . _generatePattern ( bits , size ) for _ in xrange ( self . numFeatures ) ] )
Generates a pool of features to be used for the experiments .
9,775
def createThreeObjects ( ) : objectA = zip ( range ( 10 ) , range ( 10 ) ) objectB = [ ( 0 , 0 ) , ( 2 , 2 ) , ( 1 , 1 ) , ( 1 , 4 ) , ( 4 , 2 ) , ( 4 , 1 ) ] objectC = [ ( 0 , 0 ) , ( 1 , 1 ) , ( 3 , 1 ) , ( 0 , 1 ) ] return [ objectA , objectB , objectC ]
Helper function that creates a set of three objects used for basic experiments .
9,776
def runSharedFeatures ( noiseLevel = None , profile = False ) : exp = L4L2Experiment ( "shared_features" , enableLateralSP = True , enableFeedForwardSP = True ) pairs = createThreeObjects ( ) objects = createObjectMachine ( machineType = "simple" , numInputBits = 20 , sensorInputSize = 1024 , externalInputSize = 1024 ) for object in pairs : objects . addObject ( object ) exp . learnObjects ( objects . provideObjectsToLearn ( ) ) if profile : exp . printProfile ( ) inferConfig = { "numSteps" : 10 , "noiseLevel" : noiseLevel , "pairs" : { 0 : zip ( range ( 10 ) , range ( 10 ) ) } } exp . infer ( objects . provideObjectToInfer ( inferConfig ) , objectName = 0 ) if profile : exp . printProfile ( ) exp . plotInferenceStats ( fields = [ "L2 Representation" , "Overlap L2 with object" , "L4 Representation" ] , )
Runs a simple experiment where three objects share a number of location feature pairs .
9,777
def runStretchExperiment ( numObjects = 25 ) : exp = L4L2Experiment ( "profiling_experiment" , enableLateralSP = True , enableFeedForwardSP = True ) objects = createObjectMachine ( machineType = "simple" , numInputBits = 20 , sensorInputSize = 1024 , externalInputSize = 1024 ) objects . createRandomObjects ( numObjects = numObjects , numPoints = 10 ) exp . learnObjects ( objects . provideObjectsToLearn ( ) ) exp . printProfile ( ) inferConfig = { "numSteps" : len ( objects [ 0 ] ) , "pairs" : { 0 : objects [ 0 ] } } exp . infer ( objects . provideObjectToInfer ( inferConfig ) , objectName = 0 ) exp . printProfile ( ) exp . plotInferenceStats ( fields = [ "L2 Representation" , "Overlap L2 with object" , "L4 Representation" ] )
Generates a lot of random objects to profile the network .
9,778
def plot ( self , numPoints = 100 ) : fig = plt . figure ( ) ax = fig . add_subplot ( 111 , projection = '3d' ) for feature in self . _FEATURES : for _ in xrange ( numPoints ) : x , y , z = tuple ( self . sampleLocationFromFeature ( feature ) ) ax . scatter ( x , y , z , marker = "." ) ax . set_xlabel ( 'X' ) ax . set_ylabel ( 'Y' ) ax . set_zlabel ( 'Z' ) plt . title ( "{}" . format ( self ) ) return fig , ax
Plots the object in a 3D scatter .
9,779
def printSequence ( x , formatString = "%d" ) : numElements = len ( x ) s = "" for j in range ( numElements ) : s += formatString % x [ j ] print s
Compact print a list or numpy array .
9,780
def printSequences ( x , formatString = "%d" ) : [ seqLen , numElements ] = x . shape for i in range ( seqLen ) : s = "" for j in range ( numElements ) : s += formatString % x [ i ] [ j ] print s
Print a bunch of sequences stored in a 2D numpy array .
9,781
def initialize ( self , useRandomEncoder ) : self . setRandomSeed ( self . seed ) self . dim = numpy . shape ( self . spatialConfig ) [ - 1 ] self . spatialMap = dict ( zip ( map ( tuple , list ( self . spatialConfig ) ) , self . sensoryInputElements ) ) self . lengthMotorInput1D = ( 2 * self . maxDisplacement + 1 ) * self . numActiveBitsMotorInput uniqueSensoryElements = list ( set ( self . sensoryInputElementsPool ) ) if useRandomEncoder : self . sensoryEncoder = SDRCategoryEncoder ( n = 1024 , w = self . numActiveBitsSensoryInput , categoryList = uniqueSensoryElements , forced = True ) self . lengthSensoryInput = self . sensoryEncoder . getWidth ( ) else : self . lengthSensoryInput = ( len ( self . sensoryInputElementsPool ) + 1 ) * self . numActiveBitsSensoryInput self . sensoryEncoder = CategoryEncoder ( w = self . numActiveBitsSensoryInput , categoryList = uniqueSensoryElements , forced = True ) motorEncoder1D = ScalarEncoder ( n = self . lengthMotorInput1D , w = self . numActiveBitsMotorInput , minval = - self . maxDisplacement , maxval = self . maxDisplacement , clipInput = True , forced = True ) self . motorEncoder = VectorEncoder ( length = self . dim , encoder = motorEncoder1D )
Initialize the various data structures .
9,782
def generateSensorimotorSequence ( self , sequenceLength ) : motorSequence = [ ] sensorySequence = [ ] sensorimotorSequence = [ ] currentEyeLoc = self . nupicRandomChoice ( self . spatialConfig ) for i in xrange ( sequenceLength ) : currentSensoryInput = self . spatialMap [ tuple ( currentEyeLoc ) ] nextEyeLoc , currentEyeV = self . getNextEyeLocation ( currentEyeLoc ) if self . verbosity : print "sensory input = " , currentSensoryInput , "eye location = " , currentEyeLoc , " motor command = " , currentEyeV sensoryInput = self . encodeSensoryInput ( currentSensoryInput ) motorInput = self . encodeMotorInput ( list ( currentEyeV ) ) sensorimotorInput = numpy . concatenate ( ( sensoryInput , motorInput ) ) sensorySequence . append ( sensoryInput ) motorSequence . append ( motorInput ) sensorimotorSequence . append ( sensorimotorInput ) currentEyeLoc = nextEyeLoc return ( sensorySequence , motorSequence , sensorimotorSequence )
Generate sensorimotor sequences of length sequenceLength .
9,783
def getNextEyeLocation ( self , currentEyeLoc ) : possibleEyeLocs = [ ] for loc in self . spatialConfig : shift = abs ( max ( loc - currentEyeLoc ) ) if self . minDisplacement <= shift <= self . maxDisplacement : possibleEyeLocs . append ( loc ) nextEyeLoc = self . nupicRandomChoice ( possibleEyeLocs ) eyeDiff = nextEyeLoc - currentEyeLoc return nextEyeLoc , eyeDiff
Generate next eye location based on current eye location .
9,784
def setRandomSeed ( self , seed ) : self . seed = seed self . _random = Random ( ) self . _random . setSeed ( seed )
Reset the nupic random generator . This is necessary to reset random seed to generate new sequences .
9,785
def encodeMotorInput ( self , motorInput ) : if not hasattr ( motorInput , "__iter__" ) : motorInput = list ( [ motorInput ] ) return self . motorEncoder . encode ( motorInput )
Encode motor command to bit vector .
9,786
def decodeMotorInput ( self , motorInputPattern ) : key = self . motorEncoder . decode ( motorInputPattern ) [ 0 ] . keys ( ) [ 0 ] motorCommand = self . motorEncoder . decode ( motorInputPattern ) [ 0 ] [ key ] [ 1 ] [ 0 ] return motorCommand
Decode motor command from bit vector .
9,787
def printSensoryCodingScheme ( self ) : print "\nsensory coding scheme: " for loc in self . spatialConfig : sensoryElement = self . spatialMap [ tuple ( loc ) ] print sensoryElement , "%s : " % loc , printSequence ( self . encodeSensoryInput ( sensoryElement ) )
Print sensory inputs along with their encoded versions .
9,788
def build ( self , n , vec ) : for i in range ( - self . maxDisplacement , self . maxDisplacement + 1 ) : next = vec + [ i ] if n == 1 : print '{:>5}\t' . format ( next ) , " = " , printSequence ( self . encodeMotorInput ( next ) ) else : self . build ( n - 1 , next )
Recursive function to help print motor coding scheme .
9,789
def getDefaultTMParams ( self , inputSize , numInputBits ) : sampleSize = int ( 1.5 * numInputBits ) if numInputBits == 20 : activationThreshold = 18 minThreshold = 18 elif numInputBits == 10 : activationThreshold = 8 minThreshold = 8 else : activationThreshold = int ( numInputBits * .6 ) minThreshold = activationThreshold return { "columnCount" : inputSize , "cellsPerColumn" : 16 , "learn" : True , "learnOnOneCell" : False , "initialPermanence" : 0.41 , "connectedPermanence" : 0.6 , "permanenceIncrement" : 0.1 , "permanenceDecrement" : 0.03 , "minThreshold" : minThreshold , "basalPredictedSegmentDecrement" : 0.003 , "apicalPredictedSegmentDecrement" : 0.0 , "reducedBasalThreshold" : int ( activationThreshold * 0.6 ) , "activationThreshold" : activationThreshold , "sampleSize" : sampleSize , "implementation" : "ApicalTiebreak" , "seed" : self . seed }
Returns a good default set of parameters to use in the TM region .
9,790
def create_movie ( fig , update_figure , filename , title , fps = 15 , dpi = 100 ) : FFMpegWriter = manimation . writers [ 'ffmpeg' ] metadata = dict ( title = title ) writer = FFMpegWriter ( fps = fps , metadata = metadata ) with writer . saving ( fig , filename , dpi ) : t = 0 while True : if update_figure ( t ) : writer . grab_frame ( ) t += 1 else : break
Helps us to create a movie .
9,791
def createExperimentArgs ( ) : experimentArguments = [ ] for n in [ 1500 , 1700 , 1900 , 2100 ] : for a in [ 128 ] : if ( a == 64 and n <= 1500 ) or ( a == 128 and n <= 1900 ) or ( a == 256 ) : experimentArguments . append ( ( "./sdr_calculations2" , "results_errorbars/temp_" + str ( n ) + "_" + str ( a ) + ".csv" , "200000" , str ( n ) , str ( a ) , "0" ) , ) return experimentArguments
Run the basic probability of false positives experiment .
9,792
def createNoiseExperimentArgs ( ) : experimentArguments = [ ] n = 6000 for a in [ 128 ] : noisePct = 0.75 while noisePct <= 0.85 : noise = int ( round ( noisePct * a , 0 ) ) experimentArguments . append ( ( "./sdr_calculations2" , "results_noise_10m/temp_" + str ( n ) + "_" + str ( a ) + "_" + str ( noise ) + "_30.csv" , "200000" , str ( n ) , str ( a ) , str ( noise ) ) ) noisePct += 0.05 return experimentArguments
Run the probability of false negatives with noise experiment .
9,793
def generateMinicolumnSDRs ( n , w , threshold ) : if not os . path . exists ( "sdrs" ) : os . makedirs ( "sdrs" ) filename = "sdrs/{}_{}_{}.json" . format ( n , w , threshold ) if len ( glob . glob ( filename ) ) > 0 : with open ( filename , "r" ) as fIn : sdrs = json . load ( fIn ) else : begin = time . time ( ) sdrs = enumerateDistantSDRsBruteForce ( n , w , threshold ) end = time . time ( ) with open ( filename , "w" ) as fOut : json . dump ( [ sdr . tolist ( ) for sdr in sdrs ] , fOut ) print ( "Saved" , filename ) print ( "Elapsed time: {:.2f} seconds" . format ( end - begin ) ) return sdrs
Wraps enumerateDistantSDRsBruteForce caching its result on the filesystem .
9,794
def createEvenlySpreadSDRs ( numSDRs , n , w ) : assert w <= n available = np . arange ( n ) np . random . shuffle ( available ) SDRs = [ ] for _ in xrange ( numSDRs ) : selected = available [ : w ] available = available [ w : ] if available . size == 0 : remainderSelected = np . random . choice ( np . setdiff1d ( np . arange ( n ) , selected ) , size = ( w - selected . size ) , replace = False ) selected = np . append ( selected , remainderSelected ) available = np . setdiff1d ( np . arange ( n ) , remainderSelected ) np . random . shuffle ( available ) selected . sort ( ) SDRs . append ( selected ) return SDRs
Return a set of ~random SDRs that use every available bit an equal number of times + - 1 .
9,795
def carefullyCollideContexts ( numContexts , numCells , numMinicolumns ) : minicolumns = [ ] for _ in xrange ( numMinicolumns ) : contextsForCell = [ set ( ) for _ in xrange ( numCells ) ] contexts = range ( numContexts ) random . shuffle ( contexts ) while len ( contexts ) > 0 : eligibleCells = range ( len ( contextsForCell ) ) while len ( contexts ) > 0 and len ( eligibleCells ) > 0 : candidateAdditions = [ ( context , cell ) for context in contexts for cell in eligibleCells ] badness = [ sum ( sum ( 1 if ( context in otherCellContexts and otherContext in otherCellContexts ) else 0 for minicolumn in minicolumns for otherCellContexts in minicolumn ) for otherContext in contextsForCell [ cell ] ) for context , cell in candidateAdditions ] selectedContext , selectedCell = candidateAdditions [ badness . index ( min ( badness ) ) ] contextsForCell [ selectedCell ] . add ( selectedContext ) eligibleCells . remove ( selectedCell ) contexts . remove ( selectedContext ) minicolumns . append ( contextsForCell ) return minicolumns
Use a greedy algorithm to choose how each minicolumn should distribute contexts between its cells .
9,796
def printSegmentForCell ( tm , cell ) : print "Segments for cell" , cell , ":" for seg in tm . basalConnections . _cells [ cell ] . _segments : print " " , synapses = seg . _synapses for s in synapses : print "%d:%g" % ( s . presynapticCell , s . permanence ) , print
Print segment information for this cell
9,797
def compute ( self , inputs , outputs ) : if len ( self . queue ) > 0 : data = self . queue . pop ( ) else : raise Exception ( "RawValues: No data: queue is empty " ) outputs [ "resetOut" ] [ 0 ] = data [ "reset" ] outputs [ "dataOut" ] [ : ] = data [ "dataOut" ]
Get the next record from the queue and outputs it .
9,798
def addDataToQueue ( self , displacement , reset = False ) : self . queue . appendleft ( { "dataOut" : list ( displacement ) , "reset" : bool ( reset ) } )
Add the given displacement to the region s internal queue . Calls to compute will cause items in the queue to be dequeued in FIFO order .
9,799
def runNetworkOnSequences ( self , inputSequences , inputCategories , tmLearn = True , upLearn = None , classifierLearn = False , verbosity = 0 , progressInterval = None ) : currentTime = time . time ( ) for i in xrange ( len ( inputSequences ) ) : sensorPattern = inputSequences [ i ] inputCategory = inputCategories [ i ] self . runNetworkOnPattern ( sensorPattern , tmLearn = tmLearn , upLearn = upLearn , sequenceLabel = inputCategory ) if classifierLearn and sensorPattern is not None : unionSDR = self . up . getUnionSDR ( ) upCellCount = self . up . getColumnDimensions ( ) self . classifier . learn ( unionSDR , inputCategory , isSparse = upCellCount ) if verbosity > 1 : pprint . pprint ( "{0} is category {1}" . format ( unionSDR , inputCategory ) ) if progressInterval is not None and i > 0 and i % progressInterval == 0 : elapsed = ( time . time ( ) - currentTime ) / 60.0 print ( "Ran {0} / {1} elements of sequence in " "{2:0.2f} minutes." . format ( i , len ( inputSequences ) , elapsed ) ) currentTime = time . time ( ) print MonitorMixinBase . mmPrettyPrintMetrics ( self . tm . mmGetDefaultMetrics ( ) ) if verbosity >= 2 : traces = self . tm . mmGetDefaultTraces ( verbosity = verbosity ) print MonitorMixinBase . mmPrettyPrintTraces ( traces , breakOnResets = self . tm . mmGetTraceResets ( ) ) if upLearn is not None : traces = self . up . mmGetDefaultTraces ( verbosity = verbosity ) print MonitorMixinBase . mmPrettyPrintTraces ( traces , breakOnResets = self . up . mmGetTraceResets ( ) ) print
Runs Union Temporal Pooler network on specified sequence .