idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
50,800
def main ( _ ) : utility . set_up_logging ( ) if not FLAGS . logdir or not FLAGS . outdir : raise KeyError ( 'You must specify logging and outdirs directories.' ) FLAGS . logdir = os . path . expanduser ( FLAGS . logdir ) FLAGS . outdir = os . path . expanduser ( FLAGS . outdir ) visualize ( FLAGS . logdir , FLAGS . outdir , FLAGS . num_agents , FLAGS . num_episodes , FLAGS . checkpoint , FLAGS . env_processes )
Load a trained algorithm and render videos .
50,801
def reinit_nested_vars ( variables , indices = None ) : if isinstance ( variables , ( tuple , list ) ) : return tf . group ( * [ reinit_nested_vars ( variable , indices ) for variable in variables ] ) if indices is None : return variables . assign ( tf . zeros_like ( variables ) ) else : zeros = tf . zeros ( [ tf . shape ( indices ) [ 0 ] ] + variables . shape [ 1 : ] . as_list ( ) ) return tf . scatter_update ( variables , indices , zeros )
Reset all variables in a nested tuple to zeros .
50,802
def assign_nested_vars ( variables , tensors , indices = None ) : if isinstance ( variables , ( tuple , list ) ) : return tf . group ( * [ assign_nested_vars ( variable , tensor ) for variable , tensor in zip ( variables , tensors ) ] ) if indices is None : return variables . assign ( tensors ) else : return tf . scatter_update ( variables , indices , tensors )
Assign tensors to matching nested tuple of variables .
50,803
def discounted_return ( reward , length , discount ) : timestep = tf . range ( reward . shape [ 1 ] . value ) mask = tf . cast ( timestep [ None , : ] < length [ : , None ] , tf . float32 ) return_ = tf . reverse ( tf . transpose ( tf . scan ( lambda agg , cur : cur + discount * agg , tf . transpose ( tf . reverse ( mask * reward , [ 1 ] ) , [ 1 , 0 ] ) , tf . zeros_like ( reward [ : , - 1 ] ) , 1 , False ) , [ 1 , 0 ] ) , [ 1 ] ) return tf . check_numerics ( tf . stop_gradient ( return_ ) , 'return' )
Discounted Monte - Carlo returns .
50,804
def fixed_step_return ( reward , value , length , discount , window ) : timestep = tf . range ( reward . shape [ 1 ] . value ) mask = tf . cast ( timestep [ None , : ] < length [ : , None ] , tf . float32 ) return_ = tf . zeros_like ( reward ) for _ in range ( window ) : return_ += reward reward = discount * tf . concat ( [ reward [ : , 1 : ] , tf . zeros_like ( reward [ : , - 1 : ] ) ] , 1 ) return_ += discount ** window * tf . concat ( [ value [ : , window : ] , tf . zeros_like ( value [ : , - window : ] ) ] , 1 ) return tf . check_numerics ( tf . stop_gradient ( mask * return_ ) , 'return' )
N - step discounted return .
50,805
def lambda_return ( reward , value , length , discount , lambda_ ) : timestep = tf . range ( reward . shape [ 1 ] . value ) mask = tf . cast ( timestep [ None , : ] < length [ : , None ] , tf . float32 ) sequence = mask * reward + discount * value * ( 1 - lambda_ ) discount = mask * discount * lambda_ sequence = tf . stack ( [ sequence , discount ] , 2 ) return_ = tf . reverse ( tf . transpose ( tf . scan ( lambda agg , cur : cur [ 0 ] + cur [ 1 ] * agg , tf . transpose ( tf . reverse ( sequence , [ 1 ] ) , [ 1 , 2 , 0 ] ) , tf . zeros_like ( value [ : , - 1 ] ) , 1 , False ) , [ 1 , 0 ] ) , [ 1 ] ) return tf . check_numerics ( tf . stop_gradient ( return_ ) , 'return' )
TD - lambda returns .
50,806
def lambda_advantage ( reward , value , length , discount , gae_lambda ) : timestep = tf . range ( reward . shape [ 1 ] . value ) mask = tf . cast ( timestep [ None , : ] < length [ : , None ] , tf . float32 ) next_value = tf . concat ( [ value [ : , 1 : ] , tf . zeros_like ( value [ : , - 1 : ] ) ] , 1 ) delta = reward + discount * next_value - value advantage = tf . reverse ( tf . transpose ( tf . scan ( lambda agg , cur : cur + gae_lambda * discount * agg , tf . transpose ( tf . reverse ( mask * delta , [ 1 ] ) , [ 1 , 0 ] ) , tf . zeros_like ( delta [ : , - 1 ] ) , 1 , False ) , [ 1 , 0 ] ) , [ 1 ] ) return tf . check_numerics ( tf . stop_gradient ( advantage ) , 'advantage' )
Generalized Advantage Estimation .
50,807
def available_gpus ( ) : local_device_protos = device_lib . list_local_devices ( ) return [ x . name for x in local_device_protos if x . device_type == 'GPU' ]
List of GPU device names detected by TensorFlow .
50,808
def gradient_summaries ( grad_vars , groups = None , scope = 'gradients' ) : groups = groups or { r'all' : r'.*' } grouped = collections . defaultdict ( list ) for grad , var in grad_vars : if grad is None : continue for name , pattern in groups . items ( ) : if re . match ( pattern , var . name ) : name = re . sub ( pattern , name , var . name ) grouped [ name ] . append ( grad ) for name in groups : if name not in grouped : tf . logging . warn ( "No variables matching '{}' group." . format ( name ) ) summaries = [ ] for name , grads in grouped . items ( ) : grads = [ tf . reshape ( grad , [ - 1 ] ) for grad in grads ] grads = tf . concat ( grads , 0 ) summaries . append ( tf . summary . histogram ( scope + '/' + name , grads ) ) return tf . summary . merge ( summaries )
Create histogram summaries of the gradient .
50,809
def variable_summaries ( vars_ , groups = None , scope = 'weights' ) : groups = groups or { r'all' : r'.*' } grouped = collections . defaultdict ( list ) for var in vars_ : for name , pattern in groups . items ( ) : if re . match ( pattern , var . name ) : name = re . sub ( pattern , name , var . name ) grouped [ name ] . append ( var ) for name in groups : if name not in grouped : tf . logging . warn ( "No variables matching '{}' group." . format ( name ) ) summaries = [ ] for name , vars_ in grouped . items ( ) : vars_ = [ tf . reshape ( var , [ - 1 ] ) for var in vars_ ] vars_ = tf . concat ( vars_ , 0 ) summaries . append ( tf . summary . histogram ( scope + '/' + name , vars_ ) ) return tf . summary . merge ( summaries )
Create histogram summaries for the provided variables .
50,810
def set_dimension ( tensor , axis , value ) : shape = tensor . shape . as_list ( ) if shape [ axis ] not in ( value , None ) : message = 'Cannot set dimension {} of tensor {} to {}; is already {}.' raise ValueError ( message . format ( axis , tensor . name , value , shape [ axis ] ) ) shape [ axis ] = value tensor . set_shape ( shape )
Set the length of a tensor along the specified dimension .
50,811
def default ( ) : algorithm = algorithms . PPO num_agents = 30 eval_episodes = 30 use_gpu = False normalize_ranges = True network = networks . feed_forward_gaussian weight_summaries = dict ( all = r'.*' , policy = r'.*/policy/.*' , value = r'.*/value/.*' ) policy_layers = 200 , 100 value_layers = 200 , 100 init_output_factor = 0.1 init_std = 0.35 update_every = 30 update_epochs = 25 optimizer = tf . train . AdamOptimizer learning_rate = 1e-4 discount = 0.995 kl_target = 1e-2 kl_cutoff_factor = 2 kl_cutoff_coef = 1000 kl_init_penalty = 1 return locals ( )
Default configuration for PPO .
50,812
def pendulum ( ) : locals ( ) . update ( default ( ) ) env = 'Pendulum-v0' max_length = 200 steps = 1e6 batch_size = 20 chunk_length = 50 return locals ( )
Configuration for the pendulum classic control task .
50,813
def cartpole ( ) : locals ( ) . update ( default ( ) ) env = 'CartPole-v1' max_length = 500 steps = 2e5 normalize_ranges = False network = networks . feed_forward_categorical return locals ( )
Configuration for the cart pole classic control task .
50,814
def reacher ( ) : locals ( ) . update ( default ( ) ) env = 'Reacher-v2' max_length = 1000 steps = 5e6 discount = 0.985 update_every = 60 return locals ( )
Configuration for MuJoCo s reacher task .
50,815
def bullet_ant ( ) : locals ( ) . update ( default ( ) ) import pybullet_envs env = 'AntBulletEnv-v0' max_length = 1000 steps = 3e7 update_every = 60 return locals ( )
Configuration for PyBullet s ant task .
50,816
def step ( self , actions ) : for index , ( env , action ) in enumerate ( zip ( self . _envs , actions ) ) : if not env . action_space . contains ( action ) : message = 'Invalid action at index {}: {}' raise ValueError ( message . format ( index , action ) ) if self . _blocking : transitions = [ env . step ( action ) for env , action in zip ( self . _envs , actions ) ] else : transitions = [ env . step ( action , blocking = False ) for env , action in zip ( self . _envs , actions ) ] transitions = [ transition ( ) for transition in transitions ] observs , rewards , dones , infos = zip ( * transitions ) observ = np . stack ( observs ) reward = np . stack ( rewards ) done = np . stack ( dones ) info = tuple ( infos ) return observ , reward , done , info
Forward a batch of actions to the wrapped environments .
50,817
def call ( self , name , * args , ** kwargs ) : payload = name , args , kwargs self . _conn . send ( ( self . _CALL , payload ) ) return self . _receive
Asynchronously call a method of the external environment .
50,818
def close ( self ) : try : self . _conn . send ( ( self . _CLOSE , None ) ) self . _conn . close ( ) except IOError : pass self . _process . join ( )
Send a close message to the external process and join it .
50,819
def step ( self , action , blocking = True ) : promise = self . call ( 'step' , action ) if blocking : return promise ( ) else : return promise
Step the environment .
50,820
def _receive ( self ) : message , payload = self . _conn . recv ( ) if message == self . _EXCEPTION : stacktrace = payload raise Exception ( stacktrace ) if message == self . _RESULT : return payload raise KeyError ( 'Received message of unexpected type {}' . format ( message ) )
Wait for a message from the worker process and return its payload .
50,821
def _worker ( self , constructor , conn ) : try : env = constructor ( ) while True : try : if not conn . poll ( 0.1 ) : continue message , payload = conn . recv ( ) except ( EOFError , KeyboardInterrupt ) : break if message == self . _ACCESS : name = payload result = getattr ( env , name ) conn . send ( ( self . _RESULT , result ) ) continue if message == self . _CALL : name , args , kwargs = payload result = getattr ( env , name ) ( * args , ** kwargs ) conn . send ( ( self . _RESULT , result ) ) continue if message == self . _CLOSE : assert payload is None break raise KeyError ( 'Received message of unknown type {}' . format ( message ) ) except Exception : stacktrace = '' . join ( traceback . format_exception ( * sys . exc_info ( ) ) ) tf . logging . error ( 'Error in environment process: {}' . format ( stacktrace ) ) conn . send ( ( self . _EXCEPTION , stacktrace ) ) conn . close ( )
The process waits for actions and sends back environment results .
50,822
def step ( self , action ) : observ , reward , done , info = self . _env . step ( action ) observ = self . _convert_observ ( observ ) reward = self . _convert_reward ( reward ) return observ , reward , done , info
Forward action to the wrapped environment .
50,823
def _convert_observ ( self , observ ) : if not np . isfinite ( observ ) . all ( ) : raise ValueError ( 'Infinite observation encountered.' ) if observ . dtype == np . float64 : return observ . astype ( np . float32 ) if observ . dtype == np . int64 : return observ . astype ( np . int32 ) return observ
Convert the observation to 32 bits .
50,824
def _convert_reward ( self , reward ) : if not np . isfinite ( reward ) . all ( ) : raise ValueError ( 'Infinite reward encountered.' ) return np . array ( reward , dtype = np . float32 )
Convert the reward to 32 bits .
50,825
def value ( self ) : return self . _sum / tf . cast ( self . _count , self . _dtype )
The current value of the mean .
50,826
def submit ( self , value ) : if value . shape . ndims == self . _sum . shape . ndims : value = value [ None , ... ] return tf . group ( self . _sum . assign_add ( tf . reduce_sum ( value , 0 ) ) , self . _count . assign_add ( tf . shape ( value ) [ 0 ] ) )
Submit a single or batch tensor to refine the streaming mean .
50,827
def clear ( self ) : value = self . _sum / tf . cast ( self . _count , self . _dtype ) with tf . control_dependencies ( [ value ] ) : reset_value = self . _sum . assign ( tf . zeros_like ( self . _sum ) ) reset_count = self . _count . assign ( 0 ) with tf . control_dependencies ( [ reset_value , reset_count ] ) : return tf . identity ( value )
Return the mean estimate and reset the streaming statistics .
50,828
def zip_ ( * structures , ** kwargs ) : flatten = kwargs . pop ( 'flatten' , False ) assert not kwargs , 'zip() got unexpected keyword arguments.' return map ( lambda * x : x if len ( x ) > 1 else x [ 0 ] , * structures , flatten = flatten )
Combine corresponding elements in multiple nested structure to tuples .
50,829
def map_ ( function , * structures , ** kwargs ) : flatten = kwargs . pop ( 'flatten' , False ) assert not kwargs , 'map() got unexpected keyword arguments.' def impl ( function , * structures ) : if len ( structures ) == 0 : return structures if all ( isinstance ( s , ( tuple , list ) ) for s in structures ) : if len ( set ( len ( x ) for x in structures ) ) > 1 : raise ValueError ( 'Cannot merge tuples or lists of different length.' ) args = tuple ( ( impl ( function , * x ) for x in _builtin_zip ( * structures ) ) ) if hasattr ( structures [ 0 ] , '_fields' ) : return type ( structures [ 0 ] ) ( * args ) else : return type ( structures [ 0 ] ) ( args ) if all ( isinstance ( s , dict ) for s in structures ) : if len ( set ( frozenset ( x . keys ( ) ) for x in structures ) ) > 1 : raise ValueError ( 'Cannot merge dicts with different keys.' ) merged = { k : impl ( function , * ( s [ k ] for s in structures ) ) for k in structures [ 0 ] } return type ( structures [ 0 ] ) ( merged ) return function ( * structures ) result = impl ( function , * structures ) if flatten : result = flatten_ ( result ) return result
Apply a function to every element in a nested structure .
50,830
def flatten_ ( structure ) : if isinstance ( structure , dict ) : if structure : structure = zip ( * sorted ( structure . items ( ) , key = lambda x : x [ 0 ] ) ) [ 1 ] else : structure = ( ) if isinstance ( structure , ( tuple , list ) ) : result = [ ] for element in structure : result += flatten_ ( element ) return tuple ( result ) return ( structure , )
Combine all leaves of a nested structure into a tuple .
50,831
def filter_ ( predicate , * structures , ** kwargs ) : flatten = kwargs . pop ( 'flatten' , False ) assert not kwargs , 'filter() got unexpected keyword arguments.' def impl ( predicate , * structures ) : if len ( structures ) == 0 : return structures if all ( isinstance ( s , ( tuple , list ) ) for s in structures ) : if len ( set ( len ( x ) for x in structures ) ) > 1 : raise ValueError ( 'Cannot merge tuples or lists of different length.' ) if len ( structures ) > 1 : filtered = ( impl ( predicate , * x ) for x in _builtin_zip ( * structures ) ) else : filtered = ( impl ( predicate , x ) for x in structures [ 0 ] ) if hasattr ( structures [ 0 ] , '_fields' ) : filtered = ( x if x != ( ) else None for x in filtered ) return type ( structures [ 0 ] ) ( * filtered ) else : filtered = ( x for x in filtered if not isinstance ( x , ( tuple , list , dict ) ) or x ) return type ( structures [ 0 ] ) ( filtered ) if all ( isinstance ( s , dict ) for s in structures ) : if len ( set ( frozenset ( x . keys ( ) ) for x in structures ) ) > 1 : raise ValueError ( 'Cannot merge dicts with different keys.' ) if len ( structures ) > 1 : filtered = { k : impl ( predicate , * ( s [ k ] for s in structures ) ) for k in structures [ 0 ] } else : filtered = { k : impl ( predicate , v ) for k , v in structures [ 0 ] . items ( ) } filtered = { k : v for k , v in filtered . items ( ) if not isinstance ( v , ( tuple , list , dict ) ) or v } return type ( structures [ 0 ] ) ( filtered ) if len ( structures ) > 1 : return structures if predicate ( * structures ) else ( ) else : return structures [ 0 ] if predicate ( structures [ 0 ] ) else ( ) result = impl ( predicate , * structures ) if flatten : result = flatten_ ( result ) return result
Select elements of a nested structure based on a predicate function .
50,832
def add_phase ( self , name , done , score , summary , steps , report_every = None , log_every = None , checkpoint_every = None , feed = None ) : done = tf . convert_to_tensor ( done , tf . bool ) score = tf . convert_to_tensor ( score , tf . float32 ) summary = tf . convert_to_tensor ( summary , tf . string ) feed = feed or { } if done . shape . ndims is None or score . shape . ndims is None : raise ValueError ( "Rank of 'done' and 'score' tensors must be known." ) writer = self . _logdir and tf . summary . FileWriter ( os . path . join ( self . _logdir , name ) , tf . get_default_graph ( ) , flush_secs = 60 ) op = self . _define_step ( done , score , summary ) batch = 1 if score . shape . ndims == 0 else score . shape [ 0 ] . value self . _phases . append ( _Phase ( name , writer , op , batch , int ( steps ) , feed , report_every , log_every , checkpoint_every ) )
Add a phase to the loop protocol .
50,833
def run ( self , sess , saver , max_step = None ) : global_step = sess . run ( self . _step ) steps_made = 1 while True : if max_step and global_step >= max_step : break phase , epoch , steps_in = self . _find_current_phase ( global_step ) phase_step = epoch * phase . steps + steps_in if steps_in % phase . steps < steps_made : message = '\n' + ( '-' * 50 ) + '\n' message += 'Phase {} (phase step {}, global step {}).' tf . logging . info ( message . format ( phase . name , phase_step , global_step ) ) phase . feed [ self . _reset ] = ( steps_in < steps_made ) phase . feed [ self . _log ] = ( phase . writer and self . _is_every_steps ( phase_step , phase . batch , phase . log_every ) ) phase . feed [ self . _report ] = ( self . _is_every_steps ( phase_step , phase . batch , phase . report_every ) ) summary , mean_score , global_step , steps_made = sess . run ( phase . op , phase . feed ) if self . _is_every_steps ( phase_step , phase . batch , phase . checkpoint_every ) : self . _store_checkpoint ( sess , saver , global_step ) if self . _is_every_steps ( phase_step , phase . batch , phase . report_every ) : yield mean_score if summary and phase . writer : longest_phase = max ( phase . steps for phase in self . _phases ) summary_step = epoch * longest_phase + steps_in phase . writer . add_summary ( summary , summary_step )
Run the loop schedule for a specified number of steps .
50,834
def _is_every_steps ( self , phase_step , batch , every ) : if not every : return False covered_steps = range ( phase_step , phase_step + batch ) return any ( ( step + 1 ) % every == 0 for step in covered_steps )
Determine whether a periodic event should happen at this step .
50,835
def _find_current_phase ( self , global_step ) : epoch_size = sum ( phase . steps for phase in self . _phases ) epoch = int ( global_step // epoch_size ) steps_in = global_step % epoch_size for phase in self . _phases : if steps_in < phase . steps : return phase , epoch , steps_in steps_in -= phase . steps
Determine the current phase based on the global step .
50,836
def _define_step ( self , done , score , summary ) : if done . shape . ndims == 0 : done = done [ None ] if score . shape . ndims == 0 : score = score [ None ] score_mean = streaming_mean . StreamingMean ( ( ) , tf . float32 ) with tf . control_dependencies ( [ done , score , summary ] ) : done_score = tf . gather ( score , tf . where ( done ) [ : , 0 ] ) submit_score = tf . cond ( tf . reduce_any ( done ) , lambda : score_mean . submit ( done_score ) , tf . no_op ) with tf . control_dependencies ( [ submit_score ] ) : mean_score = tf . cond ( self . _report , score_mean . clear , float ) steps_made = tf . shape ( score ) [ 0 ] next_step = self . _step . assign_add ( steps_made ) with tf . control_dependencies ( [ mean_score , next_step ] ) : return tf . identity ( summary ) , mean_score , next_step , steps_made
Combine operations of a phase .
50,837
def _store_checkpoint ( self , sess , saver , global_step ) : if not self . _logdir or not saver : return tf . gfile . MakeDirs ( self . _logdir ) filename = os . path . join ( self . _logdir , 'model.ckpt' ) saver . save ( sess , filename , global_step )
Store a checkpoint if a log directory was provided to the constructor .
50,838
def _define_loop ( graph , logdir , train_steps , eval_steps ) : loop = tools . Loop ( logdir , graph . step , graph . should_log , graph . do_report , graph . force_reset ) loop . add_phase ( 'train' , graph . done , graph . score , graph . summary , train_steps , report_every = train_steps , log_every = train_steps // 2 , checkpoint_every = None , feed = { graph . is_training : True } ) loop . add_phase ( 'eval' , graph . done , graph . score , graph . summary , eval_steps , report_every = eval_steps , log_every = eval_steps // 2 , checkpoint_every = 10 * eval_steps , feed = { graph . is_training : False } ) return loop
Create and configure a training loop with training and evaluation phases .
50,839
def train ( config , env_processes ) : tf . reset_default_graph ( ) if config . update_every % config . num_agents : tf . logging . warn ( 'Number of agents should divide episodes per update.' ) with tf . device ( '/cpu:0' ) : batch_env = utility . define_batch_env ( lambda : _create_environment ( config ) , config . num_agents , env_processes ) graph = utility . define_simulation_graph ( batch_env , config . algorithm , config ) loop = _define_loop ( graph , config . logdir , config . update_every * config . max_length , config . eval_episodes * config . max_length ) total_steps = int ( config . steps / config . update_every * ( config . update_every + config . eval_episodes ) ) saver = utility . define_saver ( exclude = ( r'.*_temporary.*' , ) ) sess_config = tf . ConfigProto ( allow_soft_placement = True ) sess_config . gpu_options . allow_growth = True with tf . Session ( config = sess_config ) as sess : utility . initialize_variables ( sess , saver , config . logdir ) for score in loop . run ( sess , saver , total_steps ) : yield score batch_env . close ( )
Training and evaluation entry point yielding scores .
50,840
def main ( _ ) : utility . set_up_logging ( ) if not FLAGS . config : raise KeyError ( 'You must specify a configuration.' ) logdir = FLAGS . logdir and os . path . expanduser ( os . path . join ( FLAGS . logdir , '{}-{}' . format ( FLAGS . timestamp , FLAGS . config ) ) ) try : config = utility . load_config ( logdir ) except IOError : config = tools . AttrDict ( getattr ( configs , FLAGS . config ) ( ) ) config = utility . save_config ( config , logdir ) for score in train ( config , FLAGS . env_processes ) : tf . logging . info ( 'Score {}.' . format ( score ) )
Create or load configuration and launch the trainer .
50,841
def iterate_sequences ( consumer_fn , output_template , sequences , length , chunk_length = None , batch_size = None , num_epochs = 1 , padding_value = 0 ) : if not length . shape [ 0 ] . value : raise ValueError ( 'Batch size of length tensor must be set.' ) num_sequences = length . shape [ 0 ] . value sequences = dict ( sequence = sequences , length = length ) dataset = tf . data . Dataset . from_tensor_slices ( sequences ) dataset = dataset . repeat ( num_epochs ) if chunk_length : dataset = dataset . map ( remove_padding ) . flat_map ( lambda x : tf . data . Dataset . from_tensor_slices ( chunk_sequence ( x , chunk_length , padding_value ) ) ) num_chunks = tf . reduce_sum ( ( length - 1 ) // chunk_length + 1 ) else : num_chunks = num_sequences if batch_size : dataset = dataset . shuffle ( num_sequences // 2 ) dataset = dataset . batch ( batch_size or num_sequences ) dataset = dataset . prefetch ( num_epochs ) iterator = dataset . make_initializable_iterator ( ) with tf . control_dependencies ( [ iterator . initializer ] ) : num_batches = num_epochs * num_chunks // ( batch_size or num_sequences ) return tf . scan ( lambda _1 , index : consumer_fn ( iterator . get_next ( ) ) , tf . range ( num_batches ) , output_template , parallel_iterations = 1 )
Iterate over batches of chunks of sequences for multiple epochs .
50,842
def chunk_sequence ( sequence , chunk_length = 200 , padding_value = 0 ) : if 'length' in sequence : length = sequence . pop ( 'length' ) else : length = tf . shape ( tools . nested . flatten ( sequence ) [ 0 ] ) [ 0 ] num_chunks = ( length - 1 ) // chunk_length + 1 padding_length = chunk_length * num_chunks - length padded = tools . nested . map ( lambda tensor : tf . concat ( [ tensor , 0 * tensor [ : padding_length ] + padding_value ] , 0 ) , sequence ) chunks = tools . nested . map ( lambda tensor : tf . reshape ( tensor , [ num_chunks , chunk_length ] + tensor . shape [ 1 : ] . as_list ( ) ) , padded ) chunks [ 'length' ] = tf . concat ( [ chunk_length * tf . ones ( ( num_chunks - 1 , ) , dtype = tf . int32 ) , [ chunk_length - padding_length ] ] , 0 ) return chunks
Split a nested dict of sequence tensors into a batch of chunks .
50,843
def remove_padding ( sequence ) : length = sequence . pop ( 'length' ) sequence = tools . nested . map ( lambda tensor : tensor [ : length ] , sequence ) return sequence
Selects the used frames of a sequence up to its length .
50,844
def transform ( self , value ) : with tf . name_scope ( self . _name + '/transform' ) : no_batch_dim = value . shape . ndims == self . _mean . shape . ndims if no_batch_dim : value = value [ None , ... ] if self . _center : value -= self . _mean [ None , ... ] if self . _scale : value /= tf . cond ( self . _count > 1 , lambda : self . _std ( ) + 1e-8 , lambda : tf . ones_like ( self . _var_sum ) ) [ None ] if self . _clip : value = tf . clip_by_value ( value , - self . _clip , self . _clip ) if no_batch_dim : value = value [ 0 ] return tf . check_numerics ( value , 'value' )
Normalize a single or batch tensor .
50,845
def update ( self , value ) : with tf . name_scope ( self . _name + '/update' ) : if value . shape . ndims == self . _mean . shape . ndims : value = value [ None , ... ] count = tf . shape ( value ) [ 0 ] with tf . control_dependencies ( [ self . _count . assign_add ( count ) ] ) : step = tf . cast ( self . _count , tf . float32 ) mean_delta = tf . reduce_sum ( value - self . _mean [ None , ... ] , 0 ) new_mean = self . _mean + mean_delta / step new_mean = tf . cond ( self . _count > 1 , lambda : new_mean , lambda : value [ 0 ] ) var_delta = ( value - self . _mean [ None , ... ] ) * ( value - new_mean [ None , ... ] ) new_var_sum = self . _var_sum + tf . reduce_sum ( var_delta , 0 ) with tf . control_dependencies ( [ new_mean , new_var_sum ] ) : update = self . _mean . assign ( new_mean ) , self . _var_sum . assign ( new_var_sum ) with tf . control_dependencies ( update ) : if value . shape . ndims == 1 : value = tf . reduce_mean ( value ) return self . _summary ( 'value' , tf . reduce_mean ( value ) )
Update the mean and variance estimates .
50,846
def reset ( self ) : with tf . name_scope ( self . _name + '/reset' ) : return tf . group ( self . _count . assign ( 0 ) , self . _mean . assign ( tf . zeros_like ( self . _mean ) ) , self . _var_sum . assign ( tf . zeros_like ( self . _var_sum ) ) )
Reset the estimates of mean and variance .
50,847
def summary ( self ) : with tf . name_scope ( self . _name + '/summary' ) : mean_summary = tf . cond ( self . _count > 0 , lambda : self . _summary ( 'mean' , self . _mean ) , str ) std_summary = tf . cond ( self . _count > 1 , lambda : self . _summary ( 'stddev' , self . _std ( ) ) , str ) return tf . summary . merge ( [ mean_summary , std_summary ] )
Summary string of mean and standard deviation .
50,848
def _std ( self ) : variance = tf . cond ( self . _count > 1 , lambda : self . _var_sum / tf . cast ( self . _count - 1 , tf . float32 ) , lambda : tf . ones_like ( self . _var_sum ) * float ( 'nan' ) ) return tf . sqrt ( variance + 1e-4 )
Computes the current estimate of the standard deviation .
50,849
def _summary ( self , name , tensor ) : if tensor . shape . ndims == 0 : return tf . summary . scalar ( name , tensor ) else : return tf . summary . histogram ( name , tensor )
Create a scalar or histogram summary matching the rank of the tensor .
50,850
def length ( self , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows return tf . gather ( self . _length , rows )
Tensor holding the current length of episodes .
50,851
def append ( self , transitions , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 assert_capacity = tf . assert_less ( rows , self . _capacity , message = 'capacity exceeded' ) with tf . control_dependencies ( [ assert_capacity ] ) : assert_max_length = tf . assert_less ( tf . gather ( self . _length , rows ) , self . _max_length , message = 'max length exceeded' ) with tf . control_dependencies ( [ assert_max_length ] ) : timestep = tf . gather ( self . _length , rows ) indices = tf . stack ( [ rows , timestep ] , 1 ) append_ops = tools . nested . map ( lambda var , val : tf . scatter_nd_update ( var , indices , val ) , self . _buffers , transitions , flatten = True ) with tf . control_dependencies ( append_ops ) : episode_mask = tf . reduce_sum ( tf . one_hot ( rows , self . _capacity , dtype = tf . int32 ) , 0 ) return self . _length . assign_add ( episode_mask )
Append a batch of transitions to rows of the memory .
50,852
def replace ( self , episodes , length , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 assert_capacity = tf . assert_less ( rows , self . _capacity , message = 'capacity exceeded' ) with tf . control_dependencies ( [ assert_capacity ] ) : assert_max_length = tf . assert_less_equal ( length , self . _max_length , message = 'max length exceeded' ) with tf . control_dependencies ( [ assert_max_length ] ) : replace_ops = tools . nested . map ( lambda var , val : tf . scatter_update ( var , rows , val ) , self . _buffers , episodes , flatten = True ) with tf . control_dependencies ( replace_ops ) : return tf . scatter_update ( self . _length , rows , length )
Replace full episodes .
50,853
def data ( self , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 episode = tools . nested . map ( lambda var : tf . gather ( var , rows ) , self . _buffers ) length = tf . gather ( self . _length , rows ) return episode , length
Access a batch of episodes from the memory .
50,854
def clear ( self , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 return tf . scatter_update ( self . _length , rows , tf . zeros_like ( rows ) )
Reset episodes in the memory .
50,855
def _parse_shape ( self , space ) : if isinstance ( space , gym . spaces . Discrete ) : return ( ) if isinstance ( space , gym . spaces . Box ) : return space . shape raise NotImplementedError ( )
Get a tensor shape from a OpenAI Gym space .
50,856
def _parse_dtype ( self , space ) : if isinstance ( space , gym . spaces . Discrete ) : return tf . int32 if isinstance ( space , gym . spaces . Box ) : return tf . float32 raise NotImplementedError ( )
Get a tensor dtype from a OpenAI Gym space .
50,857
def begin_episode ( self , agent_indices ) : with tf . name_scope ( 'begin_episode/' ) : if self . _last_state is None : reset_state = tf . no_op ( ) else : reset_state = utility . reinit_nested_vars ( self . _last_state , agent_indices ) reset_buffer = self . _current_episodes . clear ( agent_indices ) with tf . control_dependencies ( [ reset_state , reset_buffer ] ) : return tf . constant ( '' )
Reset the recurrent states and stored episode .
50,858
def perform ( self , agent_indices , observ ) : with tf . name_scope ( 'perform/' ) : observ = self . _observ_filter . transform ( observ ) if self . _last_state is None : state = None else : state = tools . nested . map ( lambda x : tf . gather ( x , agent_indices ) , self . _last_state ) with tf . device ( '/gpu:0' if self . _use_gpu else '/cpu:0' ) : output = self . _network ( observ [ : , None ] , tf . ones ( observ . shape [ 0 ] ) , state ) action = tf . cond ( self . _is_training , output . policy . sample , output . policy . mode ) logprob = output . policy . log_prob ( action ) [ : , 0 ] summary = tf . cond ( self . _should_log , lambda : tf . summary . merge ( [ tf . summary . histogram ( 'mode' , output . policy . mode ( ) [ : , 0 ] ) , tf . summary . histogram ( 'action' , action [ : , 0 ] ) , tf . summary . histogram ( 'logprob' , logprob ) ] ) , str ) if self . _last_state is None : assign_state = tf . no_op ( ) else : assign_state = utility . assign_nested_vars ( self . _last_state , output . state , agent_indices ) remember_last_action = tf . scatter_update ( self . _last_action , agent_indices , action [ : , 0 ] ) policy_params = tools . nested . filter ( lambda x : isinstance ( x , tf . Tensor ) , output . policy . parameters ) assert policy_params , 'Policy has no parameters to store.' remember_last_policy = tools . nested . map ( lambda var , val : tf . scatter_update ( var , agent_indices , val [ : , 0 ] ) , self . _last_policy , policy_params , flatten = True ) with tf . control_dependencies ( ( assign_state , remember_last_action ) + remember_last_policy ) : return action [ : , 0 ] , tf . identity ( summary )
Compute batch of actions and a summary for a batch of observation .
50,859
def experience ( self , agent_indices , observ , action , reward , unused_done , unused_nextob ) : with tf . name_scope ( 'experience/' ) : return tf . cond ( self . _is_training , lambda : self . _define_experience ( agent_indices , observ , action , reward ) , str )
Process the transition tuple of the current step .
50,860
def end_episode ( self , agent_indices ) : with tf . name_scope ( 'end_episode/' ) : return tf . cond ( self . _is_training , lambda : self . _define_end_episode ( agent_indices ) , str )
Add episodes to the memory and perform update steps if memory is full .
50,861
def _initialize_policy ( self ) : with tf . device ( '/gpu:0' if self . _use_gpu else '/cpu:0' ) : network = functools . partial ( self . _config . network , self . _config , self . _batch_env . action_space ) self . _network = tf . make_template ( 'network' , network ) output = self . _network ( tf . zeros_like ( self . _batch_env . observ ) [ : , None ] , tf . ones ( len ( self . _batch_env ) ) ) if output . policy . event_shape != self . _batch_env . action . shape [ 1 : ] : message = 'Policy event shape {} does not match action shape {}.' message = message . format ( output . policy . event_shape , self . _batch_env . action . shape [ 1 : ] ) raise ValueError ( message ) self . _policy_type = type ( output . policy ) is_tensor = lambda x : isinstance ( x , tf . Tensor ) policy_params = tools . nested . filter ( is_tensor , output . policy . parameters ) set_batch_dim = lambda x : utility . set_dimension ( x , 0 , len ( self . _batch_env ) ) tools . nested . map ( set_batch_dim , policy_params ) if output . state is not None : tools . nested . map ( set_batch_dim , output . state ) return policy_params , output . state
Initialize the policy .
50,862
def _initialize_memory ( self , policy_params ) : template = ( self . _batch_env . observ [ 0 ] , self . _batch_env . action [ 0 ] , tools . nested . map ( lambda x : x [ 0 , 0 ] , policy_params ) , self . _batch_env . reward [ 0 ] ) with tf . variable_scope ( 'ppo_temporary' ) : self . _current_episodes = parts . EpisodeMemory ( template , len ( self . _batch_env ) , self . _config . max_length , 'episodes' ) self . _finished_episodes = parts . EpisodeMemory ( template , self . _config . update_every , self . _config . max_length , 'memory' ) self . _num_finished_episodes = tf . Variable ( 0 , False )
Initialize temporary and permanent memory .
50,863
def _training ( self ) : with tf . device ( '/gpu:0' if self . _use_gpu else '/cpu:0' ) : with tf . name_scope ( 'training' ) : assert_full = tf . assert_equal ( self . _num_finished_episodes , self . _config . update_every ) with tf . control_dependencies ( [ assert_full ] ) : data = self . _finished_episodes . data ( ) ( observ , action , old_policy_params , reward ) , length = data old_policy_params = tools . nested . map ( lambda param : self . _mask ( param , length , 1 ) , old_policy_params ) with tf . control_dependencies ( [ tf . assert_greater ( length , 0 ) ] ) : length = tf . identity ( length ) observ = self . _observ_filter . transform ( observ ) reward = self . _reward_filter . transform ( reward ) update_summary = self . _perform_update_steps ( observ , action , old_policy_params , reward , length ) with tf . control_dependencies ( [ update_summary ] ) : penalty_summary = self . _adjust_penalty ( observ , old_policy_params , length ) with tf . control_dependencies ( [ penalty_summary ] ) : clear_memory = tf . group ( self . _finished_episodes . clear ( ) , self . _num_finished_episodes . assign ( 0 ) ) with tf . control_dependencies ( [ clear_memory ] ) : weight_summary = utility . variable_summaries ( tf . trainable_variables ( ) , self . _config . weight_summaries ) return tf . summary . merge ( [ update_summary , penalty_summary , weight_summary ] )
Perform multiple training iterations of both policy and value baseline .
50,864
def _perform_update_steps ( self , observ , action , old_policy_params , reward , length ) : return_ = utility . discounted_return ( reward , length , self . _config . discount ) value = self . _network ( observ , length ) . value if self . _config . gae_lambda : advantage = utility . lambda_advantage ( reward , value , length , self . _config . discount , self . _config . gae_lambda ) else : advantage = return_ - value mean , variance = tf . nn . moments ( advantage , axes = [ 0 , 1 ] , keep_dims = True ) advantage = ( advantage - mean ) / ( tf . sqrt ( variance ) + 1e-8 ) advantage = tf . Print ( advantage , [ tf . reduce_mean ( return_ ) , tf . reduce_mean ( value ) ] , 'return and value: ' ) advantage = tf . Print ( advantage , [ tf . reduce_mean ( advantage ) ] , 'normalized advantage: ' ) episodes = ( observ , action , old_policy_params , reward , advantage ) value_loss , policy_loss , summary = parts . iterate_sequences ( self . _update_step , [ 0. , 0. , '' ] , episodes , length , self . _config . chunk_length , self . _config . batch_size , self . _config . update_epochs , padding_value = 1 ) print_losses = tf . group ( tf . Print ( 0 , [ tf . reduce_mean ( value_loss ) ] , 'value loss: ' ) , tf . Print ( 0 , [ tf . reduce_mean ( policy_loss ) ] , 'policy loss: ' ) ) with tf . control_dependencies ( [ value_loss , policy_loss , print_losses ] ) : return summary [ self . _config . update_epochs // 2 ]
Perform multiple update steps of value function and policy .
50,865
def _update_step ( self , sequence ) : observ , action , old_policy_params , reward , advantage = sequence [ 'sequence' ] length = sequence [ 'length' ] old_policy = self . _policy_type ( ** old_policy_params ) value_loss , value_summary = self . _value_loss ( observ , reward , length ) network = self . _network ( observ , length ) policy_loss , policy_summary = self . _policy_loss ( old_policy , network . policy , action , advantage , length ) network_loss = network . get ( 'loss' , 0.0 ) loss = policy_loss + value_loss + tf . reduce_mean ( network_loss ) gradients , variables = ( zip ( * self . _optimizer . compute_gradients ( loss ) ) ) optimize = self . _optimizer . apply_gradients ( zip ( gradients , variables ) ) summary = tf . summary . merge ( [ value_summary , policy_summary , tf . summary . histogram ( 'network_loss' , network_loss ) , tf . summary . scalar ( 'avg_network_loss' , tf . reduce_mean ( network_loss ) ) , tf . summary . scalar ( 'gradient_norm' , tf . global_norm ( gradients ) ) , utility . gradient_summaries ( zip ( gradients , variables ) ) ] ) with tf . control_dependencies ( [ optimize ] ) : return [ tf . identity ( x ) for x in ( value_loss , policy_loss , summary ) ]
Compute the current combined loss and perform a gradient update step .
50,866
def _value_loss ( self , observ , reward , length ) : with tf . name_scope ( 'value_loss' ) : value = self . _network ( observ , length ) . value return_ = utility . discounted_return ( reward , length , self . _config . discount ) advantage = return_ - value value_loss = 0.5 * self . _mask ( advantage ** 2 , length ) summary = tf . summary . merge ( [ tf . summary . histogram ( 'value_loss' , value_loss ) , tf . summary . scalar ( 'avg_value_loss' , tf . reduce_mean ( value_loss ) ) ] ) value_loss = tf . reduce_mean ( value_loss ) return tf . check_numerics ( value_loss , 'value_loss' ) , summary
Compute the loss function for the value baseline .
50,867
def _policy_loss ( self , old_policy , policy , action , advantage , length ) : with tf . name_scope ( 'policy_loss' ) : kl = tf . contrib . distributions . kl_divergence ( old_policy , policy ) kl = tf . check_numerics ( kl , 'kl' ) kl = tf . reduce_mean ( self . _mask ( kl , length ) , 1 ) policy_gradient = tf . exp ( policy . log_prob ( action ) - old_policy . log_prob ( action ) ) surrogate_loss = - tf . reduce_mean ( self . _mask ( policy_gradient * tf . stop_gradient ( advantage ) , length ) , 1 ) surrogate_loss = tf . check_numerics ( surrogate_loss , 'surrogate_loss' ) kl_penalty = self . _penalty * kl cutoff_threshold = self . _config . kl_target * self . _config . kl_cutoff_factor cutoff_count = tf . reduce_sum ( tf . cast ( kl > cutoff_threshold , tf . int32 ) ) with tf . control_dependencies ( [ tf . cond ( cutoff_count > 0 , lambda : tf . Print ( 0 , [ cutoff_count ] , 'kl cutoff! ' ) , int ) ] ) : kl_cutoff = ( self . _config . kl_cutoff_coef * tf . cast ( kl > cutoff_threshold , tf . float32 ) * ( kl - cutoff_threshold ) ** 2 ) policy_loss = surrogate_loss + kl_penalty + kl_cutoff entropy = tf . reduce_mean ( policy . entropy ( ) , axis = 1 ) if self . _config . entropy_regularization : policy_loss -= self . _config . entropy_regularization * entropy summary = tf . summary . merge ( [ tf . summary . histogram ( 'entropy' , entropy ) , tf . summary . histogram ( 'kl' , kl ) , tf . summary . histogram ( 'surrogate_loss' , surrogate_loss ) , tf . summary . histogram ( 'kl_penalty' , kl_penalty ) , tf . summary . histogram ( 'kl_cutoff' , kl_cutoff ) , tf . summary . histogram ( 'kl_penalty_combined' , kl_penalty + kl_cutoff ) , tf . summary . histogram ( 'policy_loss' , policy_loss ) , tf . summary . scalar ( 'avg_surr_loss' , tf . reduce_mean ( surrogate_loss ) ) , tf . summary . scalar ( 'avg_kl_penalty' , tf . reduce_mean ( kl_penalty ) ) , tf . summary . scalar ( 'avg_policy_loss' , tf . reduce_mean ( policy_loss ) ) ] ) policy_loss = tf . reduce_mean ( policy_loss , 0 ) return tf . check_numerics ( policy_loss , 'policy_loss' ) , summary
Compute the policy loss composed of multiple components .
50,868
def _adjust_penalty ( self , observ , old_policy_params , length ) : old_policy = self . _policy_type ( ** old_policy_params ) with tf . name_scope ( 'adjust_penalty' ) : network = self . _network ( observ , length ) print_penalty = tf . Print ( 0 , [ self . _penalty ] , 'current penalty: ' ) with tf . control_dependencies ( [ print_penalty ] ) : kl_change = tf . reduce_mean ( self . _mask ( tf . contrib . distributions . kl_divergence ( old_policy , network . policy ) , length ) ) kl_change = tf . Print ( kl_change , [ kl_change ] , 'kl change: ' ) maybe_increase = tf . cond ( kl_change > 1.3 * self . _config . kl_target , lambda : tf . Print ( self . _penalty . assign ( self . _penalty * 1.5 ) , [ 0 ] , 'increase penalty ' ) , float ) maybe_decrease = tf . cond ( kl_change < 0.7 * self . _config . kl_target , lambda : tf . Print ( self . _penalty . assign ( self . _penalty / 1.5 ) , [ 0 ] , 'decrease penalty ' ) , float ) with tf . control_dependencies ( [ maybe_increase , maybe_decrease ] ) : return tf . summary . merge ( [ tf . summary . scalar ( 'kl_change' , kl_change ) , tf . summary . scalar ( 'penalty' , self . _penalty ) ] )
Adjust the KL policy between the behavioral and current policy .
50,869
def _mask ( self , tensor , length , padding_value = 0 ) : with tf . name_scope ( 'mask' ) : range_ = tf . range ( tensor . shape [ 1 ] . value ) mask = range_ [ None , : ] < length [ : , None ] if tensor . shape . ndims > 2 : for _ in range ( tensor . shape . ndims - 2 ) : mask = mask [ ... , None ] mask = tf . tile ( mask , [ 1 , 1 ] + tensor . shape [ 2 : ] . as_list ( ) ) masked = tf . where ( mask , tensor , padding_value * tf . ones_like ( tensor ) ) return tf . check_numerics ( masked , 'masked' )
Set padding elements of a batch of sequences to a constant .
50,870
def main ( self , * args , ** kwargs ) : self . start ( * args , ** kwargs ) try : while 1 : body , message = yield self . receive ( ) handler = self . get_handler ( message ) handler ( body , message ) finally : self . stop ( * args , ** kwargs )
Implement the actor main loop by waiting forever for messages .
50,871
def send ( self , method , args = { } , to = None , nowait = False , ** kwargs ) : if to is None : to = self . routing_key r = self . call_or_cast ( method , args , routing_key = to , nowait = nowait , ** kwargs ) if not nowait : return r . get ( )
Call method on agent listening to routing_key .
50,872
def throw ( self , method , args = { } , nowait = False , ** kwargs ) : r = self . call_or_cast ( method , args , type = ACTOR_TYPE . RR , nowait = nowait , ** kwargs ) if not nowait : return r
Call method on one of the agents in round robin .
50,873
def scatter ( self , method , args = { } , nowait = False , timeout = None , ** kwargs ) : timeout = timeout if timeout is not None else self . default_timeout r = self . call_or_cast ( method , args , type = ACTOR_TYPE . SCATTER , nowait = nowait , timeout = timeout , ** kwargs ) if not nowait : return r . gather ( timeout = timeout , ** kwargs )
Broadcast method to all agents .
50,874
def call_or_cast ( self , method , args = { } , nowait = False , ** kwargs ) : return ( nowait and self . cast or self . call ) ( method , args , ** kwargs )
Apply remote method asynchronously or synchronously depending on the value of nowait .
50,875
def cast ( self , method , args = { } , declare = None , retry = None , retry_policy = None , type = None , exchange = None , ** props ) : retry = self . retry if retry is None else retry body = { 'class' : self . name , 'method' : method , 'args' : args } _retry_policy = self . retry_policy if retry_policy : _retry_policy = dict ( _retry_policy , ** retry_policy ) if type and type not in self . types : raise ValueError ( 'Unsupported type: {0}' . format ( type ) ) elif not type : type = ACTOR_TYPE . DIRECT props . setdefault ( 'routing_key' , self . routing_key ) props . setdefault ( 'serializer' , self . serializer ) exchange = exchange or self . type_to_exchange [ type ] ( ) declare = ( maybe_list ( declare ) or [ ] ) + [ exchange ] with producers [ self . _connection ] . acquire ( block = True ) as producer : return producer . publish ( body , exchange = exchange , declare = declare , retry = retry , retry_policy = retry_policy , ** props )
Send message to actor . Discarding replies .
50,876
def handle_call ( self , body , message ) : try : r = self . _DISPATCH ( body , ticket = message . properties [ 'reply_to' ] ) except self . Next : pass else : self . reply ( message , r )
Handle call message .
50,877
def _on_message ( self , body , message ) : if message . properties . get ( 'reply_to' ) : handler = self . handle_call else : handler = self . handle_cast def handle ( ) : try : handler ( body , message ) except Exception : raise except BaseException : message . ack ( ) raise else : message . ack ( ) handle ( )
What to do when a message is received .
50,878
def parse_options ( self , prog_name , arguments ) : if '--version' in arguments : self . exit_status ( self . version , fh = sys . stdout ) parser = self . create_parser ( prog_name ) options , args = parser . parse_args ( arguments ) return options , args
Parse the available options .
50,879
def get ( self , ** kwargs ) : "What kind of arguments should be pass here" kwargs . setdefault ( 'limit' , 1 ) return self . _first ( self . gather ( ** kwargs ) )
What kind of arguments should be pass here
50,880
def _gather ( self , * args , ** kwargs ) : propagate = kwargs . pop ( 'propagate' , True ) return ( self . to_python ( reply , propagate = propagate ) for reply in self . actor . _collect_replies ( * args , ** kwargs ) )
Generator over the results
50,881
def to_python ( self , reply , propagate = True ) : try : return reply [ 'ok' ] except KeyError : error = self . Error ( * reply . get ( 'nok' ) or ( ) ) if propagate : raise error return error
Extracts the value out of the reply message .
50,882
def spawn ( self , cls , kwargs = { } , nowait = False ) : actor_id = uuid ( ) if str ( qualname ( cls ) ) == '__builtin__.unicode' : name = cls else : name = qualname ( cls ) res = self . call ( 'spawn' , { 'cls' : name , 'id' : actor_id , 'kwargs' : kwargs } , type = ACTOR_TYPE . RR , nowait = nowait ) return ActorProxy ( name , actor_id , res , agent = self , connection = self . connection , ** kwargs )
Spawn a new actor on a celery worker by sending a remote command to the worker .
50,883
def select ( self , cls , ** kwargs ) : name = qualname ( cls ) id = first_reply ( self . scatter ( 'select' , { 'cls' : name } , limit = 1 ) , cls ) return ActorProxy ( name , id , agent = self , connection = self . connection , ** kwargs )
Get the id of already spawned actor
50,884
def process_message ( self , actor , body , message ) : if actor is not self and self . is_green ( ) : self . pool . spawn_n ( actor . _on_message , body , message ) else : if not self . is_green ( ) and message . properties . get ( 'reply_to' ) : warn ( 'Starting a blocking call (%s) on actor (%s) ' 'when greenlets are disabled.' , itemgetter ( 'method' ) ( body ) , actor . __class__ ) actor . _on_message ( body , message )
Process actor message depending depending on the the worker settings .
50,885
def get_outer_frame_variables ( ) : cur_filename = inspect . getframeinfo ( inspect . currentframe ( ) ) . filename outer_frame = next ( f for f in inspect . getouterframes ( inspect . currentframe ( ) ) if f . filename != cur_filename ) variables = { } variables . update ( outer_frame . frame . f_globals ) variables . update ( outer_frame . frame . f_locals ) return variables
Get a dict of local and global variables of the first outer frame from another file .
50,886
def extract_table_names ( query ) : tables_blocks = re . findall ( r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)' , query , re . IGNORECASE ) tables = [ tbl for block in tables_blocks for tbl in re . findall ( r'\w+' , block ) ] return set ( tables )
Extract table names from an SQL query .
50,887
def write_table ( df , tablename , conn ) : with catch_warnings ( ) : filterwarnings ( 'ignore' , message = 'The provided table name \'%s\' is not found exactly as such in the database' % tablename ) to_sql ( df , name = tablename , con = conn , index = not any ( name is None for name in df . index . names ) )
Write a dataframe to the database .
50,888
def make_benchark ( n_train , n_test , n_dim = 2 ) : X_train = np . random . rand ( n_train , n_dim ) y_train = np . random . rand ( n_train ) X_test = np . random . rand ( n_test , n_dim ) res = { } for variogram_model in VARIOGRAM_MODELS : tic = time ( ) OK = OrdinaryKriging ( X_train [ : , 0 ] , X_train [ : , 1 ] , y_train , variogram_model = 'linear' , verbose = False , enable_plotting = False ) res [ 't_train_{}' . format ( variogram_model ) ] = time ( ) - tic for backend in BACKENDS : for n_closest_points in N_MOVING_WINDOW : if backend == 'vectorized' and n_closest_points is not None : continue tic = time ( ) OK . execute ( 'points' , X_test [ : , 0 ] , X_test [ : , 1 ] , backend = backend , n_closest_points = n_closest_points ) res [ 't_test_{}_{}' . format ( backend , n_closest_points ) ] = time ( ) - tic return res
Compute the benchmarks for Ordianry Kriging
50,889
def print_benchmark ( n_train , n_test , n_dim , res ) : print ( '=' * 80 ) print ( ' ' * 10 , 'N_dim={}, N_train={}, N_test={}' . format ( n_dim , n_train , n_test ) ) print ( '=' * 80 ) print ( '\n' , '# Training the model' , '\n' ) print ( '|' . join ( [ '{:>11} ' . format ( el ) for el in [ 't_train (s)' ] + VARIOGRAM_MODELS ] ) ) print ( '-' * ( 11 + 2 ) * ( len ( VARIOGRAM_MODELS ) + 1 ) ) print ( '|' . join ( [ '{:>11} ' . format ( 'Training' ) ] + [ '{:>11.2} ' . format ( el ) for el in [ res [ 't_train_{}' . format ( mod ) ] for mod in VARIOGRAM_MODELS ] ] ) ) print ( '\n' , '# Predicting kriging points' , '\n' ) print ( '|' . join ( [ '{:>11} ' . format ( el ) for el in [ 't_test (s)' ] + BACKENDS ] ) ) print ( '-' * ( 11 + 2 ) * ( len ( BACKENDS ) + 1 ) ) for n_closest_points in N_MOVING_WINDOW : timing_results = [ res . get ( 't_test_{}_{}' . format ( mod , n_closest_points ) , '' ) for mod in BACKENDS ] print ( '|' . join ( [ '{:>11} ' . format ( 'N_nn=' + str ( n_closest_points ) ) ] + [ '{:>11.2} ' . format ( el ) for el in timing_results ] ) )
Print the benchmarks
50,890
def display_variogram_model ( self ) : fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . plot ( self . lags , self . semivariance , 'r*' ) ax . plot ( self . lags , self . variogram_function ( self . variogram_model_parameters , self . lags ) , 'k-' ) plt . show ( )
Displays variogram model with the actual binned data .
50,891
def plot_epsilon_residuals ( self ) : fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . scatter ( range ( self . epsilon . size ) , self . epsilon , c = 'k' , marker = '*' ) ax . axhline ( y = 0.0 ) plt . show ( )
Plots the epsilon residuals for the variogram fit .
50,892
def print_statistics ( self ) : print ( "Q1 =" , self . Q1 ) print ( "Q2 =" , self . Q2 ) print ( "cR =" , self . cR )
Prints out the Q1 Q2 and cR statistics for the variogram fit . NOTE that ideally Q1 is close to zero Q2 is close to 1 and cR is as small as possible .
50,893
def _adjust_for_anisotropy ( X , center , scaling , angle ) : center = np . asarray ( center ) [ None , : ] angle = np . asarray ( angle ) * np . pi / 180 X -= center Ndim = X . shape [ 1 ] if Ndim == 1 : raise NotImplementedError ( 'Not implemnented yet?' ) elif Ndim == 2 : stretch = np . array ( [ [ 1 , 0 ] , [ 0 , scaling [ 0 ] ] ] ) rot_tot = np . array ( [ [ np . cos ( - angle [ 0 ] ) , - np . sin ( - angle [ 0 ] ) ] , [ np . sin ( - angle [ 0 ] ) , np . cos ( - angle [ 0 ] ) ] ] ) elif Ndim == 3 : stretch = np . array ( [ [ 1. , 0. , 0. ] , [ 0. , scaling [ 0 ] , 0. ] , [ 0. , 0. , scaling [ 1 ] ] ] ) rotate_x = np . array ( [ [ 1. , 0. , 0. ] , [ 0. , np . cos ( - angle [ 0 ] ) , - np . sin ( - angle [ 0 ] ) ] , [ 0. , np . sin ( - angle [ 0 ] ) , np . cos ( - angle [ 0 ] ) ] ] ) rotate_y = np . array ( [ [ np . cos ( - angle [ 1 ] ) , 0. , np . sin ( - angle [ 1 ] ) ] , [ 0. , 1. , 0. ] , [ - np . sin ( - angle [ 1 ] ) , 0. , np . cos ( - angle [ 1 ] ) ] ] ) rotate_z = np . array ( [ [ np . cos ( - angle [ 2 ] ) , - np . sin ( - angle [ 2 ] ) , 0. ] , [ np . sin ( - angle [ 2 ] ) , np . cos ( - angle [ 2 ] ) , 0. ] , [ 0. , 0. , 1. ] ] ) rot_tot = np . dot ( rotate_z , np . dot ( rotate_y , rotate_x ) ) else : raise ValueError ( "Adjust for anisotropy function doesn't " "support ND spaces where N>3" ) X_adj = np . dot ( stretch , np . dot ( rot_tot , X . T ) ) . T X_adj += center return X_adj
Adjusts data coordinates to take into account anisotropy . Can also be used to take into account data scaling . Angles are CCW about specified axes . Scaling is applied in rotated coordinate system .
50,894
def _calculate_variogram_model ( lags , semivariance , variogram_model , variogram_function , weight ) : if variogram_model == 'linear' : x0 = [ ( np . amax ( semivariance ) - np . amin ( semivariance ) ) / ( np . amax ( lags ) - np . amin ( lags ) ) , np . amin ( semivariance ) ] bnds = ( [ 0. , 0. ] , [ np . inf , np . amax ( semivariance ) ] ) elif variogram_model == 'power' : x0 = [ ( np . amax ( semivariance ) - np . amin ( semivariance ) ) / ( np . amax ( lags ) - np . amin ( lags ) ) , 1.1 , np . amin ( semivariance ) ] bnds = ( [ 0. , 0.001 , 0. ] , [ np . inf , 1.999 , np . amax ( semivariance ) ] ) else : x0 = [ np . amax ( semivariance ) - np . amin ( semivariance ) , 0.25 * np . amax ( lags ) , np . amin ( semivariance ) ] bnds = ( [ 0. , 0. , 0. ] , [ 10. * np . amax ( semivariance ) , np . amax ( lags ) , np . amax ( semivariance ) ] ) res = least_squares ( _variogram_residuals , x0 , bounds = bnds , loss = 'soft_l1' , args = ( lags , semivariance , variogram_function , weight ) ) return res . x
Function that fits a variogram model when parameters are not specified . Returns variogram model parameters that minimize the RMSE between the specified variogram function and the actual calculated variogram points .
50,895
def _krige ( X , y , coords , variogram_function , variogram_model_parameters , coordinates_type ) : zero_index = None zero_value = False if coordinates_type == 'euclidean' : d = squareform ( pdist ( X , metric = 'euclidean' ) ) bd = np . squeeze ( cdist ( X , coords [ None , : ] , metric = 'euclidean' ) ) elif coordinates_type == 'geographic' : x1 , x2 = np . meshgrid ( X [ : , 0 ] , X [ : , 0 ] , sparse = True ) y1 , y2 = np . meshgrid ( X [ : , 1 ] , X [ : , 1 ] , sparse = True ) d = great_circle_distance ( x1 , y1 , x2 , y2 ) bd = great_circle_distance ( X [ : , 0 ] , X [ : , 1 ] , coords [ 0 ] * np . ones ( X . shape [ 0 ] ) , coords [ 1 ] * np . ones ( X . shape [ 0 ] ) ) else : raise ValueError ( "Specified coordinate type '%s' " "is not supported." % coordinates_type ) if np . any ( np . absolute ( bd ) <= 1e-10 ) : zero_value = True zero_index = np . where ( bd <= 1e-10 ) [ 0 ] [ 0 ] n = X . shape [ 0 ] a = np . zeros ( ( n + 1 , n + 1 ) ) a [ : n , : n ] = - variogram_function ( variogram_model_parameters , d ) np . fill_diagonal ( a , 0.0 ) a [ n , : ] = 1.0 a [ : , n ] = 1.0 a [ n , n ] = 0.0 b = np . zeros ( ( n + 1 , 1 ) ) b [ : n , 0 ] = - variogram_function ( variogram_model_parameters , bd ) if zero_value : b [ zero_index , 0 ] = 0.0 b [ n , 0 ] = 1.0 res = np . linalg . solve ( a , b ) zinterp = np . sum ( res [ : n , 0 ] * y ) sigmasq = np . sum ( res [ : , 0 ] * - b [ : , 0 ] ) return zinterp , sigmasq
Sets up and solves the ordinary kriging system for the given coordinate pair . This function is only used for the statistics calculations .
50,896
def _find_statistics ( X , y , variogram_function , variogram_model_parameters , coordinates_type ) : delta = np . zeros ( y . shape ) sigma = np . zeros ( y . shape ) for i in range ( y . shape [ 0 ] ) : if i == 0 : continue else : k , ss = _krige ( X [ : i , : ] , y [ : i ] , X [ i , : ] , variogram_function , variogram_model_parameters , coordinates_type ) if np . absolute ( ss ) < eps : continue delta [ i ] = y [ i ] - k sigma [ i ] = np . sqrt ( ss ) delta = delta [ sigma > eps ] sigma = sigma [ sigma > eps ] epsilon = delta / sigma return delta , sigma , epsilon
Calculates variogram fit statistics . Returns the delta sigma and epsilon values for the variogram fit . These arrays are used for statistics calculations .
50,897
def fit ( self , p , x , y ) : self . regression_model . fit ( p , y ) ml_pred = self . regression_model . predict ( p ) print ( 'Finished learning regression model' ) self . krige . fit ( x = x , y = y - ml_pred ) print ( 'Finished kriging residuals' )
fit the regression method and also Krige the residual
50,898
def score ( self , p , x , y , sample_weight = None ) : return r2_score ( y_pred = self . predict ( p , x ) , y_true = y , sample_weight = sample_weight )
Overloading default regression score method
50,899
def _exec_loop_moving_window ( self , a_all , bd_all , mask , bd_idx ) : import scipy . linalg . lapack npt = bd_all . shape [ 0 ] n = bd_idx . shape [ 1 ] kvalues = np . zeros ( npt ) sigmasq = np . zeros ( npt ) for i in np . nonzero ( ~ mask ) [ 0 ] : b_selector = bd_idx [ i ] bd = bd_all [ i ] a_selector = np . concatenate ( ( b_selector , np . array ( [ a_all . shape [ 0 ] - 1 ] ) ) ) a = a_all [ a_selector [ : , None ] , a_selector ] if np . any ( np . absolute ( bd ) <= self . eps ) : zero_value = True zero_index = np . where ( np . absolute ( bd ) <= self . eps ) else : zero_value = False zero_index = None b = np . zeros ( ( n + 1 , 1 ) ) b [ : n , 0 ] = - self . variogram_function ( self . variogram_model_parameters , bd ) if zero_value : b [ zero_index [ 0 ] , 0 ] = 0.0 b [ n , 0 ] = 1.0 x = scipy . linalg . solve ( a , b ) kvalues [ i ] = x [ : n , 0 ] . dot ( self . VALUES [ b_selector ] ) sigmasq [ i ] = - x [ : , 0 ] . dot ( b [ : , 0 ] ) return kvalues , sigmasq
Solves the kriging system by looping over all specified points . Uses only a certain number of closest points . Not very memory intensive but the loop is done in pure Python .