idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
50,800
def main ( _ ) : utility . set_up_logging ( ) if not FLAGS . logdir or not FLAGS . outdir : raise KeyError ( 'You must specify logging and outdirs directories.' ) FLAGS . logdir = os . path . expanduser ( FLAGS . logdir ) FLAGS . outdir = os . path . expanduser ( FLAGS . outdir ) visualize ( FLAGS . logdir , FLAGS . ou...
Load a trained algorithm and render videos .
50,801
def reinit_nested_vars ( variables , indices = None ) : if isinstance ( variables , ( tuple , list ) ) : return tf . group ( * [ reinit_nested_vars ( variable , indices ) for variable in variables ] ) if indices is None : return variables . assign ( tf . zeros_like ( variables ) ) else : zeros = tf . zeros ( [ tf . sha...
Reset all variables in a nested tuple to zeros .
50,802
def assign_nested_vars ( variables , tensors , indices = None ) : if isinstance ( variables , ( tuple , list ) ) : return tf . group ( * [ assign_nested_vars ( variable , tensor ) for variable , tensor in zip ( variables , tensors ) ] ) if indices is None : return variables . assign ( tensors ) else : return tf . scatt...
Assign tensors to matching nested tuple of variables .
50,803
def discounted_return ( reward , length , discount ) : timestep = tf . range ( reward . shape [ 1 ] . value ) mask = tf . cast ( timestep [ None , : ] < length [ : , None ] , tf . float32 ) return_ = tf . reverse ( tf . transpose ( tf . scan ( lambda agg , cur : cur + discount * agg , tf . transpose ( tf . reverse ( ma...
Discounted Monte - Carlo returns .
50,804
def fixed_step_return ( reward , value , length , discount , window ) : timestep = tf . range ( reward . shape [ 1 ] . value ) mask = tf . cast ( timestep [ None , : ] < length [ : , None ] , tf . float32 ) return_ = tf . zeros_like ( reward ) for _ in range ( window ) : return_ += reward reward = discount * tf . conca...
N - step discounted return .
50,805
def lambda_return ( reward , value , length , discount , lambda_ ) : timestep = tf . range ( reward . shape [ 1 ] . value ) mask = tf . cast ( timestep [ None , : ] < length [ : , None ] , tf . float32 ) sequence = mask * reward + discount * value * ( 1 - lambda_ ) discount = mask * discount * lambda_ sequence = tf . s...
TD - lambda returns .
50,806
def lambda_advantage ( reward , value , length , discount , gae_lambda ) : timestep = tf . range ( reward . shape [ 1 ] . value ) mask = tf . cast ( timestep [ None , : ] < length [ : , None ] , tf . float32 ) next_value = tf . concat ( [ value [ : , 1 : ] , tf . zeros_like ( value [ : , - 1 : ] ) ] , 1 ) delta = rewar...
Generalized Advantage Estimation .
50,807
def available_gpus ( ) : local_device_protos = device_lib . list_local_devices ( ) return [ x . name for x in local_device_protos if x . device_type == 'GPU' ]
List of GPU device names detected by TensorFlow .
50,808
def gradient_summaries ( grad_vars , groups = None , scope = 'gradients' ) : groups = groups or { r'all' : r'.*' } grouped = collections . defaultdict ( list ) for grad , var in grad_vars : if grad is None : continue for name , pattern in groups . items ( ) : if re . match ( pattern , var . name ) : name = re . sub ( p...
Create histogram summaries of the gradient .
50,809
def variable_summaries ( vars_ , groups = None , scope = 'weights' ) : groups = groups or { r'all' : r'.*' } grouped = collections . defaultdict ( list ) for var in vars_ : for name , pattern in groups . items ( ) : if re . match ( pattern , var . name ) : name = re . sub ( pattern , name , var . name ) grouped [ name ...
Create histogram summaries for the provided variables .
50,810
def set_dimension ( tensor , axis , value ) : shape = tensor . shape . as_list ( ) if shape [ axis ] not in ( value , None ) : message = 'Cannot set dimension {} of tensor {} to {}; is already {}.' raise ValueError ( message . format ( axis , tensor . name , value , shape [ axis ] ) ) shape [ axis ] = value tensor . se...
Set the length of a tensor along the specified dimension .
50,811
def default ( ) : algorithm = algorithms . PPO num_agents = 30 eval_episodes = 30 use_gpu = False normalize_ranges = True network = networks . feed_forward_gaussian weight_summaries = dict ( all = r'.*' , policy = r'.*/policy/.*' , value = r'.*/value/.*' ) policy_layers = 200 , 100 value_layers = 200 , 100 init_output_...
Default configuration for PPO .
50,812
def pendulum ( ) : locals ( ) . update ( default ( ) ) env = 'Pendulum-v0' max_length = 200 steps = 1e6 batch_size = 20 chunk_length = 50 return locals ( )
Configuration for the pendulum classic control task .
50,813
def cartpole ( ) : locals ( ) . update ( default ( ) ) env = 'CartPole-v1' max_length = 500 steps = 2e5 normalize_ranges = False network = networks . feed_forward_categorical return locals ( )
Configuration for the cart pole classic control task .
50,814
def reacher ( ) : locals ( ) . update ( default ( ) ) env = 'Reacher-v2' max_length = 1000 steps = 5e6 discount = 0.985 update_every = 60 return locals ( )
Configuration for MuJoCo s reacher task .
50,815
def bullet_ant ( ) : locals ( ) . update ( default ( ) ) import pybullet_envs env = 'AntBulletEnv-v0' max_length = 1000 steps = 3e7 update_every = 60 return locals ( )
Configuration for PyBullet s ant task .
50,816
def step ( self , actions ) : for index , ( env , action ) in enumerate ( zip ( self . _envs , actions ) ) : if not env . action_space . contains ( action ) : message = 'Invalid action at index {}: {}' raise ValueError ( message . format ( index , action ) ) if self . _blocking : transitions = [ env . step ( action ) f...
Forward a batch of actions to the wrapped environments .
50,817
def call ( self , name , * args , ** kwargs ) : payload = name , args , kwargs self . _conn . send ( ( self . _CALL , payload ) ) return self . _receive
Asynchronously call a method of the external environment .
50,818
def close ( self ) : try : self . _conn . send ( ( self . _CLOSE , None ) ) self . _conn . close ( ) except IOError : pass self . _process . join ( )
Send a close message to the external process and join it .
50,819
def step ( self , action , blocking = True ) : promise = self . call ( 'step' , action ) if blocking : return promise ( ) else : return promise
Step the environment .
50,820
def _receive ( self ) : message , payload = self . _conn . recv ( ) if message == self . _EXCEPTION : stacktrace = payload raise Exception ( stacktrace ) if message == self . _RESULT : return payload raise KeyError ( 'Received message of unexpected type {}' . format ( message ) )
Wait for a message from the worker process and return its payload .
50,821
def _worker ( self , constructor , conn ) : try : env = constructor ( ) while True : try : if not conn . poll ( 0.1 ) : continue message , payload = conn . recv ( ) except ( EOFError , KeyboardInterrupt ) : break if message == self . _ACCESS : name = payload result = getattr ( env , name ) conn . send ( ( self . _RESUL...
The process waits for actions and sends back environment results .
50,822
def step ( self , action ) : observ , reward , done , info = self . _env . step ( action ) observ = self . _convert_observ ( observ ) reward = self . _convert_reward ( reward ) return observ , reward , done , info
Forward action to the wrapped environment .
50,823
def _convert_observ ( self , observ ) : if not np . isfinite ( observ ) . all ( ) : raise ValueError ( 'Infinite observation encountered.' ) if observ . dtype == np . float64 : return observ . astype ( np . float32 ) if observ . dtype == np . int64 : return observ . astype ( np . int32 ) return observ
Convert the observation to 32 bits .
50,824
def _convert_reward ( self , reward ) : if not np . isfinite ( reward ) . all ( ) : raise ValueError ( 'Infinite reward encountered.' ) return np . array ( reward , dtype = np . float32 )
Convert the reward to 32 bits .
50,825
def value ( self ) : return self . _sum / tf . cast ( self . _count , self . _dtype )
The current value of the mean .
50,826
def submit ( self , value ) : if value . shape . ndims == self . _sum . shape . ndims : value = value [ None , ... ] return tf . group ( self . _sum . assign_add ( tf . reduce_sum ( value , 0 ) ) , self . _count . assign_add ( tf . shape ( value ) [ 0 ] ) )
Submit a single or batch tensor to refine the streaming mean .
50,827
def clear ( self ) : value = self . _sum / tf . cast ( self . _count , self . _dtype ) with tf . control_dependencies ( [ value ] ) : reset_value = self . _sum . assign ( tf . zeros_like ( self . _sum ) ) reset_count = self . _count . assign ( 0 ) with tf . control_dependencies ( [ reset_value , reset_count ] ) : retur...
Return the mean estimate and reset the streaming statistics .
50,828
def zip_ ( * structures , ** kwargs ) : flatten = kwargs . pop ( 'flatten' , False ) assert not kwargs , 'zip() got unexpected keyword arguments.' return map ( lambda * x : x if len ( x ) > 1 else x [ 0 ] , * structures , flatten = flatten )
Combine corresponding elements in multiple nested structure to tuples .
50,829
def map_ ( function , * structures , ** kwargs ) : flatten = kwargs . pop ( 'flatten' , False ) assert not kwargs , 'map() got unexpected keyword arguments.' def impl ( function , * structures ) : if len ( structures ) == 0 : return structures if all ( isinstance ( s , ( tuple , list ) ) for s in structures ) : if len ...
Apply a function to every element in a nested structure .
50,830
def flatten_ ( structure ) : if isinstance ( structure , dict ) : if structure : structure = zip ( * sorted ( structure . items ( ) , key = lambda x : x [ 0 ] ) ) [ 1 ] else : structure = ( ) if isinstance ( structure , ( tuple , list ) ) : result = [ ] for element in structure : result += flatten_ ( element ) return t...
Combine all leaves of a nested structure into a tuple .
50,831
def filter_ ( predicate , * structures , ** kwargs ) : flatten = kwargs . pop ( 'flatten' , False ) assert not kwargs , 'filter() got unexpected keyword arguments.' def impl ( predicate , * structures ) : if len ( structures ) == 0 : return structures if all ( isinstance ( s , ( tuple , list ) ) for s in structures ) :...
Select elements of a nested structure based on a predicate function .
50,832
def add_phase ( self , name , done , score , summary , steps , report_every = None , log_every = None , checkpoint_every = None , feed = None ) : done = tf . convert_to_tensor ( done , tf . bool ) score = tf . convert_to_tensor ( score , tf . float32 ) summary = tf . convert_to_tensor ( summary , tf . string ) feed = f...
Add a phase to the loop protocol .
50,833
def run ( self , sess , saver , max_step = None ) : global_step = sess . run ( self . _step ) steps_made = 1 while True : if max_step and global_step >= max_step : break phase , epoch , steps_in = self . _find_current_phase ( global_step ) phase_step = epoch * phase . steps + steps_in if steps_in % phase . steps < step...
Run the loop schedule for a specified number of steps .
50,834
def _is_every_steps ( self , phase_step , batch , every ) : if not every : return False covered_steps = range ( phase_step , phase_step + batch ) return any ( ( step + 1 ) % every == 0 for step in covered_steps )
Determine whether a periodic event should happen at this step .
50,835
def _find_current_phase ( self , global_step ) : epoch_size = sum ( phase . steps for phase in self . _phases ) epoch = int ( global_step // epoch_size ) steps_in = global_step % epoch_size for phase in self . _phases : if steps_in < phase . steps : return phase , epoch , steps_in steps_in -= phase . steps
Determine the current phase based on the global step .
50,836
def _define_step ( self , done , score , summary ) : if done . shape . ndims == 0 : done = done [ None ] if score . shape . ndims == 0 : score = score [ None ] score_mean = streaming_mean . StreamingMean ( ( ) , tf . float32 ) with tf . control_dependencies ( [ done , score , summary ] ) : done_score = tf . gather ( sc...
Combine operations of a phase .
50,837
def _store_checkpoint ( self , sess , saver , global_step ) : if not self . _logdir or not saver : return tf . gfile . MakeDirs ( self . _logdir ) filename = os . path . join ( self . _logdir , 'model.ckpt' ) saver . save ( sess , filename , global_step )
Store a checkpoint if a log directory was provided to the constructor .
50,838
def _define_loop ( graph , logdir , train_steps , eval_steps ) : loop = tools . Loop ( logdir , graph . step , graph . should_log , graph . do_report , graph . force_reset ) loop . add_phase ( 'train' , graph . done , graph . score , graph . summary , train_steps , report_every = train_steps , log_every = train_steps /...
Create and configure a training loop with training and evaluation phases .
50,839
def train ( config , env_processes ) : tf . reset_default_graph ( ) if config . update_every % config . num_agents : tf . logging . warn ( 'Number of agents should divide episodes per update.' ) with tf . device ( '/cpu:0' ) : batch_env = utility . define_batch_env ( lambda : _create_environment ( config ) , config . n...
Training and evaluation entry point yielding scores .
50,840
def main ( _ ) : utility . set_up_logging ( ) if not FLAGS . config : raise KeyError ( 'You must specify a configuration.' ) logdir = FLAGS . logdir and os . path . expanduser ( os . path . join ( FLAGS . logdir , '{}-{}' . format ( FLAGS . timestamp , FLAGS . config ) ) ) try : config = utility . load_config ( logdir ...
Create or load configuration and launch the trainer .
50,841
def iterate_sequences ( consumer_fn , output_template , sequences , length , chunk_length = None , batch_size = None , num_epochs = 1 , padding_value = 0 ) : if not length . shape [ 0 ] . value : raise ValueError ( 'Batch size of length tensor must be set.' ) num_sequences = length . shape [ 0 ] . value sequences = dic...
Iterate over batches of chunks of sequences for multiple epochs .
50,842
def chunk_sequence ( sequence , chunk_length = 200 , padding_value = 0 ) : if 'length' in sequence : length = sequence . pop ( 'length' ) else : length = tf . shape ( tools . nested . flatten ( sequence ) [ 0 ] ) [ 0 ] num_chunks = ( length - 1 ) // chunk_length + 1 padding_length = chunk_length * num_chunks - length p...
Split a nested dict of sequence tensors into a batch of chunks .
50,843
def remove_padding ( sequence ) : length = sequence . pop ( 'length' ) sequence = tools . nested . map ( lambda tensor : tensor [ : length ] , sequence ) return sequence
Selects the used frames of a sequence up to its length .
50,844
def transform ( self , value ) : with tf . name_scope ( self . _name + '/transform' ) : no_batch_dim = value . shape . ndims == self . _mean . shape . ndims if no_batch_dim : value = value [ None , ... ] if self . _center : value -= self . _mean [ None , ... ] if self . _scale : value /= tf . cond ( self . _count > 1 ,...
Normalize a single or batch tensor .
50,845
def update ( self , value ) : with tf . name_scope ( self . _name + '/update' ) : if value . shape . ndims == self . _mean . shape . ndims : value = value [ None , ... ] count = tf . shape ( value ) [ 0 ] with tf . control_dependencies ( [ self . _count . assign_add ( count ) ] ) : step = tf . cast ( self . _count , tf...
Update the mean and variance estimates .
50,846
def reset ( self ) : with tf . name_scope ( self . _name + '/reset' ) : return tf . group ( self . _count . assign ( 0 ) , self . _mean . assign ( tf . zeros_like ( self . _mean ) ) , self . _var_sum . assign ( tf . zeros_like ( self . _var_sum ) ) )
Reset the estimates of mean and variance .
50,847
def summary ( self ) : with tf . name_scope ( self . _name + '/summary' ) : mean_summary = tf . cond ( self . _count > 0 , lambda : self . _summary ( 'mean' , self . _mean ) , str ) std_summary = tf . cond ( self . _count > 1 , lambda : self . _summary ( 'stddev' , self . _std ( ) ) , str ) return tf . summary . merge ...
Summary string of mean and standard deviation .
50,848
def _std ( self ) : variance = tf . cond ( self . _count > 1 , lambda : self . _var_sum / tf . cast ( self . _count - 1 , tf . float32 ) , lambda : tf . ones_like ( self . _var_sum ) * float ( 'nan' ) ) return tf . sqrt ( variance + 1e-4 )
Computes the current estimate of the standard deviation .
50,849
def _summary ( self , name , tensor ) : if tensor . shape . ndims == 0 : return tf . summary . scalar ( name , tensor ) else : return tf . summary . histogram ( name , tensor )
Create a scalar or histogram summary matching the rank of the tensor .
50,850
def length ( self , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows return tf . gather ( self . _length , rows )
Tensor holding the current length of episodes .
50,851
def append ( self , transitions , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 assert_capacity = tf . assert_less ( rows , self . _capacity , message = 'capacity exceeded' ) with tf . control_dependencies ( [ assert_capacity ] ) : assert_max_length = ...
Append a batch of transitions to rows of the memory .
50,852
def replace ( self , episodes , length , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 assert_capacity = tf . assert_less ( rows , self . _capacity , message = 'capacity exceeded' ) with tf . control_dependencies ( [ assert_capacity ] ) : assert_max_le...
Replace full episodes .
50,853
def data ( self , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 episode = tools . nested . map ( lambda var : tf . gather ( var , rows ) , self . _buffers ) length = tf . gather ( self . _length , rows ) return episode , length
Access a batch of episodes from the memory .
50,854
def clear ( self , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 return tf . scatter_update ( self . _length , rows , tf . zeros_like ( rows ) )
Reset episodes in the memory .
50,855
def _parse_shape ( self , space ) : if isinstance ( space , gym . spaces . Discrete ) : return ( ) if isinstance ( space , gym . spaces . Box ) : return space . shape raise NotImplementedError ( )
Get a tensor shape from a OpenAI Gym space .
50,856
def _parse_dtype ( self , space ) : if isinstance ( space , gym . spaces . Discrete ) : return tf . int32 if isinstance ( space , gym . spaces . Box ) : return tf . float32 raise NotImplementedError ( )
Get a tensor dtype from a OpenAI Gym space .
50,857
def begin_episode ( self , agent_indices ) : with tf . name_scope ( 'begin_episode/' ) : if self . _last_state is None : reset_state = tf . no_op ( ) else : reset_state = utility . reinit_nested_vars ( self . _last_state , agent_indices ) reset_buffer = self . _current_episodes . clear ( agent_indices ) with tf . contr...
Reset the recurrent states and stored episode .
50,858
def perform ( self , agent_indices , observ ) : with tf . name_scope ( 'perform/' ) : observ = self . _observ_filter . transform ( observ ) if self . _last_state is None : state = None else : state = tools . nested . map ( lambda x : tf . gather ( x , agent_indices ) , self . _last_state ) with tf . device ( '/gpu:0' i...
Compute batch of actions and a summary for a batch of observation .
50,859
def experience ( self , agent_indices , observ , action , reward , unused_done , unused_nextob ) : with tf . name_scope ( 'experience/' ) : return tf . cond ( self . _is_training , lambda : self . _define_experience ( agent_indices , observ , action , reward ) , str )
Process the transition tuple of the current step .
50,860
def end_episode ( self , agent_indices ) : with tf . name_scope ( 'end_episode/' ) : return tf . cond ( self . _is_training , lambda : self . _define_end_episode ( agent_indices ) , str )
Add episodes to the memory and perform update steps if memory is full .
50,861
def _initialize_policy ( self ) : with tf . device ( '/gpu:0' if self . _use_gpu else '/cpu:0' ) : network = functools . partial ( self . _config . network , self . _config , self . _batch_env . action_space ) self . _network = tf . make_template ( 'network' , network ) output = self . _network ( tf . zeros_like ( self...
Initialize the policy .
50,862
def _initialize_memory ( self , policy_params ) : template = ( self . _batch_env . observ [ 0 ] , self . _batch_env . action [ 0 ] , tools . nested . map ( lambda x : x [ 0 , 0 ] , policy_params ) , self . _batch_env . reward [ 0 ] ) with tf . variable_scope ( 'ppo_temporary' ) : self . _current_episodes = parts . Epis...
Initialize temporary and permanent memory .
50,863
def _training ( self ) : with tf . device ( '/gpu:0' if self . _use_gpu else '/cpu:0' ) : with tf . name_scope ( 'training' ) : assert_full = tf . assert_equal ( self . _num_finished_episodes , self . _config . update_every ) with tf . control_dependencies ( [ assert_full ] ) : data = self . _finished_episodes . data (...
Perform multiple training iterations of both policy and value baseline .
50,864
def _perform_update_steps ( self , observ , action , old_policy_params , reward , length ) : return_ = utility . discounted_return ( reward , length , self . _config . discount ) value = self . _network ( observ , length ) . value if self . _config . gae_lambda : advantage = utility . lambda_advantage ( reward , value ...
Perform multiple update steps of value function and policy .
50,865
def _update_step ( self , sequence ) : observ , action , old_policy_params , reward , advantage = sequence [ 'sequence' ] length = sequence [ 'length' ] old_policy = self . _policy_type ( ** old_policy_params ) value_loss , value_summary = self . _value_loss ( observ , reward , length ) network = self . _network ( obse...
Compute the current combined loss and perform a gradient update step .
50,866
def _value_loss ( self , observ , reward , length ) : with tf . name_scope ( 'value_loss' ) : value = self . _network ( observ , length ) . value return_ = utility . discounted_return ( reward , length , self . _config . discount ) advantage = return_ - value value_loss = 0.5 * self . _mask ( advantage ** 2 , length ) ...
Compute the loss function for the value baseline .
50,867
def _policy_loss ( self , old_policy , policy , action , advantage , length ) : with tf . name_scope ( 'policy_loss' ) : kl = tf . contrib . distributions . kl_divergence ( old_policy , policy ) kl = tf . check_numerics ( kl , 'kl' ) kl = tf . reduce_mean ( self . _mask ( kl , length ) , 1 ) policy_gradient = tf . exp ...
Compute the policy loss composed of multiple components .
50,868
def _adjust_penalty ( self , observ , old_policy_params , length ) : old_policy = self . _policy_type ( ** old_policy_params ) with tf . name_scope ( 'adjust_penalty' ) : network = self . _network ( observ , length ) print_penalty = tf . Print ( 0 , [ self . _penalty ] , 'current penalty: ' ) with tf . control_dependen...
Adjust the KL policy between the behavioral and current policy .
50,869
def _mask ( self , tensor , length , padding_value = 0 ) : with tf . name_scope ( 'mask' ) : range_ = tf . range ( tensor . shape [ 1 ] . value ) mask = range_ [ None , : ] < length [ : , None ] if tensor . shape . ndims > 2 : for _ in range ( tensor . shape . ndims - 2 ) : mask = mask [ ... , None ] mask = tf . tile (...
Set padding elements of a batch of sequences to a constant .
50,870
def main ( self , * args , ** kwargs ) : self . start ( * args , ** kwargs ) try : while 1 : body , message = yield self . receive ( ) handler = self . get_handler ( message ) handler ( body , message ) finally : self . stop ( * args , ** kwargs )
Implement the actor main loop by waiting forever for messages .
50,871
def send ( self , method , args = { } , to = None , nowait = False , ** kwargs ) : if to is None : to = self . routing_key r = self . call_or_cast ( method , args , routing_key = to , nowait = nowait , ** kwargs ) if not nowait : return r . get ( )
Call method on agent listening to routing_key .
50,872
def throw ( self , method , args = { } , nowait = False , ** kwargs ) : r = self . call_or_cast ( method , args , type = ACTOR_TYPE . RR , nowait = nowait , ** kwargs ) if not nowait : return r
Call method on one of the agents in round robin .
50,873
def scatter ( self , method , args = { } , nowait = False , timeout = None , ** kwargs ) : timeout = timeout if timeout is not None else self . default_timeout r = self . call_or_cast ( method , args , type = ACTOR_TYPE . SCATTER , nowait = nowait , timeout = timeout , ** kwargs ) if not nowait : return r . gather ( ti...
Broadcast method to all agents .
50,874
def call_or_cast ( self , method , args = { } , nowait = False , ** kwargs ) : return ( nowait and self . cast or self . call ) ( method , args , ** kwargs )
Apply remote method asynchronously or synchronously depending on the value of nowait .
50,875
def cast ( self , method , args = { } , declare = None , retry = None , retry_policy = None , type = None , exchange = None , ** props ) : retry = self . retry if retry is None else retry body = { 'class' : self . name , 'method' : method , 'args' : args } _retry_policy = self . retry_policy if retry_policy : _retry_po...
Send message to actor . Discarding replies .
50,876
def handle_call ( self , body , message ) : try : r = self . _DISPATCH ( body , ticket = message . properties [ 'reply_to' ] ) except self . Next : pass else : self . reply ( message , r )
Handle call message .
50,877
def _on_message ( self , body , message ) : if message . properties . get ( 'reply_to' ) : handler = self . handle_call else : handler = self . handle_cast def handle ( ) : try : handler ( body , message ) except Exception : raise except BaseException : message . ack ( ) raise else : message . ack ( ) handle ( )
What to do when a message is received .
50,878
def parse_options ( self , prog_name , arguments ) : if '--version' in arguments : self . exit_status ( self . version , fh = sys . stdout ) parser = self . create_parser ( prog_name ) options , args = parser . parse_args ( arguments ) return options , args
Parse the available options .
50,879
def get ( self , ** kwargs ) : "What kind of arguments should be pass here" kwargs . setdefault ( 'limit' , 1 ) return self . _first ( self . gather ( ** kwargs ) )
What kind of arguments should be pass here
50,880
def _gather ( self , * args , ** kwargs ) : propagate = kwargs . pop ( 'propagate' , True ) return ( self . to_python ( reply , propagate = propagate ) for reply in self . actor . _collect_replies ( * args , ** kwargs ) )
Generator over the results
50,881
def to_python ( self , reply , propagate = True ) : try : return reply [ 'ok' ] except KeyError : error = self . Error ( * reply . get ( 'nok' ) or ( ) ) if propagate : raise error return error
Extracts the value out of the reply message .
50,882
def spawn ( self , cls , kwargs = { } , nowait = False ) : actor_id = uuid ( ) if str ( qualname ( cls ) ) == '__builtin__.unicode' : name = cls else : name = qualname ( cls ) res = self . call ( 'spawn' , { 'cls' : name , 'id' : actor_id , 'kwargs' : kwargs } , type = ACTOR_TYPE . RR , nowait = nowait ) return ActorPr...
Spawn a new actor on a celery worker by sending a remote command to the worker .
50,883
def select ( self , cls , ** kwargs ) : name = qualname ( cls ) id = first_reply ( self . scatter ( 'select' , { 'cls' : name } , limit = 1 ) , cls ) return ActorProxy ( name , id , agent = self , connection = self . connection , ** kwargs )
Get the id of already spawned actor
50,884
def process_message ( self , actor , body , message ) : if actor is not self and self . is_green ( ) : self . pool . spawn_n ( actor . _on_message , body , message ) else : if not self . is_green ( ) and message . properties . get ( 'reply_to' ) : warn ( 'Starting a blocking call (%s) on actor (%s) ' 'when greenlets ar...
Process actor message depending depending on the the worker settings .
50,885
def get_outer_frame_variables ( ) : cur_filename = inspect . getframeinfo ( inspect . currentframe ( ) ) . filename outer_frame = next ( f for f in inspect . getouterframes ( inspect . currentframe ( ) ) if f . filename != cur_filename ) variables = { } variables . update ( outer_frame . frame . f_globals ) variables ....
Get a dict of local and global variables of the first outer frame from another file .
50,886
def extract_table_names ( query ) : tables_blocks = re . findall ( r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)' , query , re . IGNORECASE ) tables = [ tbl for block in tables_blocks for tbl in re . findall ( r'\w+' , block ) ] return set ( tables )
Extract table names from an SQL query .
50,887
def write_table ( df , tablename , conn ) : with catch_warnings ( ) : filterwarnings ( 'ignore' , message = 'The provided table name \'%s\' is not found exactly as such in the database' % tablename ) to_sql ( df , name = tablename , con = conn , index = not any ( name is None for name in df . index . names ) )
Write a dataframe to the database .
50,888
def make_benchark ( n_train , n_test , n_dim = 2 ) : X_train = np . random . rand ( n_train , n_dim ) y_train = np . random . rand ( n_train ) X_test = np . random . rand ( n_test , n_dim ) res = { } for variogram_model in VARIOGRAM_MODELS : tic = time ( ) OK = OrdinaryKriging ( X_train [ : , 0 ] , X_train [ : , 1 ] , ...
Compute the benchmarks for Ordianry Kriging
50,889
def print_benchmark ( n_train , n_test , n_dim , res ) : print ( '=' * 80 ) print ( ' ' * 10 , 'N_dim={}, N_train={}, N_test={}' . format ( n_dim , n_train , n_test ) ) print ( '=' * 80 ) print ( '\n' , '# Training the model' , '\n' ) print ( '|' . join ( [ '{:>11} ' . format ( el ) for el in [ 't_train (s)' ] + VARIOG...
Print the benchmarks
50,890
def display_variogram_model ( self ) : fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . plot ( self . lags , self . semivariance , 'r*' ) ax . plot ( self . lags , self . variogram_function ( self . variogram_model_parameters , self . lags ) , 'k-' ) plt . show ( )
Displays variogram model with the actual binned data .
50,891
def plot_epsilon_residuals ( self ) : fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . scatter ( range ( self . epsilon . size ) , self . epsilon , c = 'k' , marker = '*' ) ax . axhline ( y = 0.0 ) plt . show ( )
Plots the epsilon residuals for the variogram fit .
50,892
def print_statistics ( self ) : print ( "Q1 =" , self . Q1 ) print ( "Q2 =" , self . Q2 ) print ( "cR =" , self . cR )
Prints out the Q1 Q2 and cR statistics for the variogram fit . NOTE that ideally Q1 is close to zero Q2 is close to 1 and cR is as small as possible .
50,893
def _adjust_for_anisotropy ( X , center , scaling , angle ) : center = np . asarray ( center ) [ None , : ] angle = np . asarray ( angle ) * np . pi / 180 X -= center Ndim = X . shape [ 1 ] if Ndim == 1 : raise NotImplementedError ( 'Not implemnented yet?' ) elif Ndim == 2 : stretch = np . array ( [ [ 1 , 0 ] , [ 0 , s...
Adjusts data coordinates to take into account anisotropy . Can also be used to take into account data scaling . Angles are CCW about specified axes . Scaling is applied in rotated coordinate system .
50,894
def _calculate_variogram_model ( lags , semivariance , variogram_model , variogram_function , weight ) : if variogram_model == 'linear' : x0 = [ ( np . amax ( semivariance ) - np . amin ( semivariance ) ) / ( np . amax ( lags ) - np . amin ( lags ) ) , np . amin ( semivariance ) ] bnds = ( [ 0. , 0. ] , [ np . inf , np...
Function that fits a variogram model when parameters are not specified . Returns variogram model parameters that minimize the RMSE between the specified variogram function and the actual calculated variogram points .
50,895
def _krige ( X , y , coords , variogram_function , variogram_model_parameters , coordinates_type ) : zero_index = None zero_value = False if coordinates_type == 'euclidean' : d = squareform ( pdist ( X , metric = 'euclidean' ) ) bd = np . squeeze ( cdist ( X , coords [ None , : ] , metric = 'euclidean' ) ) elif coordin...
Sets up and solves the ordinary kriging system for the given coordinate pair . This function is only used for the statistics calculations .
50,896
def _find_statistics ( X , y , variogram_function , variogram_model_parameters , coordinates_type ) : delta = np . zeros ( y . shape ) sigma = np . zeros ( y . shape ) for i in range ( y . shape [ 0 ] ) : if i == 0 : continue else : k , ss = _krige ( X [ : i , : ] , y [ : i ] , X [ i , : ] , variogram_function , variog...
Calculates variogram fit statistics . Returns the delta sigma and epsilon values for the variogram fit . These arrays are used for statistics calculations .
50,897
def fit ( self , p , x , y ) : self . regression_model . fit ( p , y ) ml_pred = self . regression_model . predict ( p ) print ( 'Finished learning regression model' ) self . krige . fit ( x = x , y = y - ml_pred ) print ( 'Finished kriging residuals' )
fit the regression method and also Krige the residual
50,898
def score ( self , p , x , y , sample_weight = None ) : return r2_score ( y_pred = self . predict ( p , x ) , y_true = y , sample_weight = sample_weight )
Overloading default regression score method
50,899
def _exec_loop_moving_window ( self , a_all , bd_all , mask , bd_idx ) : import scipy . linalg . lapack npt = bd_all . shape [ 0 ] n = bd_idx . shape [ 1 ] kvalues = np . zeros ( npt ) sigmasq = np . zeros ( npt ) for i in np . nonzero ( ~ mask ) [ 0 ] : b_selector = bd_idx [ i ] bd = bd_all [ i ] a_selector = np . con...
Solves the kriging system by looping over all specified points . Uses only a certain number of closest points . Not very memory intensive but the loop is done in pure Python .