idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
8,100
def baby_names ( max_length = 15 ) : names = [ ] lengths = [ ] targets = [ ] with open ( os . path . join ( os . path . dirname ( sys . modules [ __name__ ] . __file__ ) , 'baby_names.csv' ) , 'rb' ) as f : first = True for l in csv . reader ( f , delimiter = ',' ) : if first : first = False continue assert len ( l ) == 4 , l name = l [ 0 ] if max_length < len ( name ) : raise ValueError ( 'Max length is too small: %d > %d' % ( max_length , len ( name ) ) ) chars = [ convert_to_int ( c ) for c in name ] names . append ( chars + ( [ EOS ] * ( max_length - len ( chars ) ) ) ) lengths . append ( [ len ( name ) ] ) values = [ float ( l [ 2 ] ) , float ( l [ 3 ] ) ] if abs ( sum ( values ) - 1 ) > 0.001 : raise ValueError ( 'Each row must sum to 1: %s' % l ) targets . append ( values ) return np . array ( names ) , np . array ( targets ) , np . array ( lengths )
Opens the baby_names csv file and produces numpy array .
8,101
def reshape_data ( tensor , per_example_length = 1 ) : dims = [ 1 , 0 ] for i in xrange ( 2 , tensor . get_shape ( ) . ndims ) : dims . append ( i ) return pt . wrap ( tf . transpose ( tensor , dims ) ) . reshape ( [ - 1 , per_example_length ] )
Reshapes input so that it is appropriate for sequence_lstm ..
8,102
def batch_normalize_with_arguments ( x , arguments ) : x = prettytensor . wrap ( x ) if isinstance ( arguments , bool ) : if arguments : return x . batch_normalize ( ) else : return x kwargs = arguments . _asdict ( ) defaults = prettytensor . _defaults for arg in ( 'learned_moments_update_rate' , 'variance_epsilon' , 'scale_after_normalization' ) : if kwargs . get ( arg , None ) is None : if arg in defaults : kwargs [ arg ] = defaults [ arg ] else : del kwargs [ arg ] return x . batch_normalize ( ** kwargs )
Applies batch normalization to x as specified in arguments .
8,103
def multilayer_fully_connected ( images , labels ) : images = pt . wrap ( images ) with pt . defaults_scope ( activation_fn = tf . nn . relu , l2loss = 0.00001 ) : return ( images . flatten ( ) . fully_connected ( 100 ) . fully_connected ( 100 ) . softmax_classifier ( 10 , labels ) )
Creates a multi layer network of fully_connected layers .
8,104
def lenet5 ( images , labels ) : images = pt . wrap ( images ) with pt . defaults_scope ( activation_fn = tf . nn . relu , l2loss = 0.00001 ) : return ( images . conv2d ( 5 , 20 ) . max_pool ( 2 , 2 ) . conv2d ( 5 , 50 ) . max_pool ( 2 , 2 ) . flatten ( ) . fully_connected ( 500 ) . softmax_classifier ( 10 , labels ) )
Creates a multi layer convolutional network .
8,105
def _full_map ( self ) : result = { } if self . _parent : result . update ( self . _parent ) result . update ( self . _map ) return result
Creates a full mapping of this and all parent key value pairs .
8,106
def var_and_name_scope ( names ) : if not names : yield None , None else : name , var_scope = names with tf . name_scope ( name ) as scope : old_vs = tf . get_variable_scope ( ) if var_scope is None : count = len ( name . split ( '/' ) ) scoped_name = '/' . join ( scope . split ( '/' ) [ - count - 1 : - 1 ] ) full_name = ( old_vs . name + '/' + scoped_name ) . lstrip ( '/' ) else : full_name = var_scope . name vs_key = tf . get_collection_ref ( variable_scope . _VARSCOPE_KEY ) try : try : vs_key [ 0 ] = tf . VariableScope ( old_vs . reuse , name = full_name , initializer = old_vs . initializer , regularizer = old_vs . regularizer , caching_device = old_vs . caching_device ) except AttributeError : vs_key [ 0 ] = variable_scope . _VariableScope ( old_vs . reuse , name = full_name , initializer = old_vs . initializer ) vs_key [ 0 ] . name_scope = scope yield scope , vs_key [ 0 ] finally : vs_key [ 0 ] = old_vs
Creates a variable scope and a name scope .
8,107
def get_current_name_scope ( ) : g = tf . get_default_graph ( ) if isinstance ( g . _name_stack , tuple ) : return g . _name_stack [ 0 ] + '/' else : return g . _name_stack + '/'
Gets the current name scope .
8,108
def make_template ( name , func , * args , ** kwargs ) : if args or kwargs : func = functools . partial ( func , * args , ** kwargs ) return Template ( name , func )
Given an arbitrary function wrap it so that it does parameter sharing .
8,109
def skip_common_stack_elements ( stacktrace , base_case ) : for i , ( trace , base ) in enumerate ( zip ( stacktrace , base_case ) ) : if trace != base : return stacktrace [ i : ] return stacktrace [ - 1 : ]
Skips items that the target stacktrace shares with the base stacktrace .
8,110
def create_model ( text_in , timesteps , phase ) : with pt . defaults_scope ( activation_fn = tf . nn . relu , l2loss = 0.00001 ) : with tf . device ( '/cpu:0' ) : embedded = text_in . embedding_lookup ( CHARS , [ EMBEDDING_SIZE ] ) lstm = ( embedded . cleave_sequence ( timesteps ) . sequence_lstm ( LOWER ) . sequence_lstm ( UPPER ) ) return ( lstm . squash_sequence ( ) . dropout ( keep_prob = 0.8 , phase = phase ) . fully_connected ( CHARS , activation_fn = None ) )
Creates a 2 layer LSTM model with dropout .
8,111
def sample ( input_placeholder , logits , seed = None , max_length = 1024 , temperature = 1.0 ) : assert temperature > 0 , 'Temperature must be greater than 0.' if not seed : seed = chr ( ord ( 'A' ) + random . randint ( 0 , 25 ) ) result = '' recurrent_runner = pt . train . RecurrentRunner ( ) recurrent_runner . reset ( ) for c in seed [ : - 1 ] : recurrent_runner . run ( [ logits ] , { input_placeholder : data_utils . convert_to_int ( c ) } ) result += c ci = ord ( seed [ - 1 ] ) while len ( result ) < max_length and ci != data_utils . EOS : result += chr ( ci ) logit_result = recurrent_runner . run ( [ logits ] , { input_placeholder : ci } ) [ 0 ] [ 0 ] logit_result /= temperature logit_result -= logit_result . max ( ) distribution = numpy . exp ( logit_result ) distribution /= distribution . sum ( ) distribution -= .00000001 ci = numpy . argmax ( numpy . random . multinomial ( 1 , distribution ) ) result += chr ( ci ) return result
Samples from the LSTM model .
8,112
def reshape ( input_layer , shape_spec ) : old_shape = input_layer . get_shape ( ) . as_list ( ) try : new_shape = _infer_unknown_dims ( old_shape , shape_spec ) except TypeError : return tf . reshape ( input_layer , shape_spec ) reshape_tensor = [ ] runner = [ ] for i , s in enumerate ( new_shape ) : if s is DIM_SAME : new_shape [ i ] = None if runner : reshape_tensor . append ( tf . constant ( runner ) ) runner = [ ] reshape_tensor . append ( tf . gather ( tf . shape ( input_layer ) , [ i ] ) ) else : runner . append ( s ) if s == - 1 : new_shape [ i ] = None if runner : reshape_tensor . append ( tf . constant ( runner ) ) if len ( reshape_tensor ) == 1 : reshape_tensor = reshape_tensor [ 0 ] else : reshape_tensor = tf . concat ( reshape_tensor , 0 ) result = tf . reshape ( input_layer , reshape_tensor ) result . set_shape ( new_shape ) return input_layer . with_tensor ( result )
Reshapes this tensor to the given spec .
8,113
def flatten ( input_layer , preserve_batch = True ) : if preserve_batch : return reshape ( input_layer , [ DIM_SAME , - 1 ] ) else : return reshape ( input_layer , [ - 1 ] )
Flattens this .
8,114
def stop_gradient ( input_layer ) : if input_layer . is_sequence ( ) : result = [ tf . stop_gradient ( t ) for t in input_layer . sequence ] return input_layer . with_sequence ( result ) else : return tf . stop_gradient ( input_layer )
Cuts off the gradient at this point .
8,115
def dropout ( input_layer , keep_prob , phase = Phase . train , name = PROVIDED ) : if phase == Phase . train : return tf . nn . dropout ( input_layer , keep_prob , name = name ) else : return input_layer
Aplies dropout if this is in the train phase .
8,116
def apply_with_summary ( input_layer , operation , * op_args , ** op_kwargs ) : return layers . apply_activation ( input_layer . bookkeeper , input_layer . tensor , operation , activation_args = op_args , activation_kwargs = op_kwargs )
Applies the given operation to input_layer and create a summary .
8,117
def _rapply ( input_layer , operation , * op_args , ** op_kwargs ) : op_args = list ( op_args ) op_args . append ( input_layer . tensor ) return input_layer . with_tensor ( operation ( * op_args , ** op_kwargs ) )
Applies the given operation to this after expanding op_args .
8,118
def apply_op ( input_layer , operation , * op_args , ** op_kwargs ) : return input_layer . with_tensor ( operation ( input_layer . tensor , * op_args , ** op_kwargs ) )
Applies the given operation to this before without adding any summaries .
8,119
def join ( input_layer , others , include_self = True , join_function = None ) : if include_self : list_of_tensors = [ input_layer ] list_of_tensors . extend ( others ) else : list_of_tensors = others return prettytensor . join_pretty_tensors ( list_of_tensors , input_layer , join_function )
Joins the provided PrettyTensors with this using the join function .
8,120
def unzip ( input_layer , split_dim = 0 , num_splits = 2 ) : shape = input_layer . shape _check_split_dims ( num_splits , split_dim , shape ) splits = functions . unzip ( input_layer , split_dim , shape [ split_dim ] , num_splits ) return input_layer . with_sequence ( splits )
Unzips this Tensor along the split_dim into num_splits Equal chunks .
8,121
def concat ( input_layer , concat_dim , other_tensors = None ) : if input_layer . is_sequence ( ) : all_tensors = input_layer . sequence all_tensors . extend ( other_tensors or [ ] ) else : all_tensors = [ input_layer ] if other_tensors is None : raise ValueError ( 'Other Tensors must be supplied.' ) all_tensors . extend ( other_tensors ) if not all_tensors : return prettytensor . wrap_sequence ( [ ] ) else : return tf . concat ( all_tensors , concat_dim )
Concatenates input PrettyTensor with other_tensors along the specified dim .
8,122
def split ( input_layer , split_dim = 0 , num_splits = 2 ) : shape = input_layer . shape _check_split_dims ( num_splits , split_dim , shape ) splits = tf . split ( value = input_layer , num_or_size_splits = num_splits , axis = split_dim ) return input_layer . with_sequence ( splits )
Splits this Tensor along the split_dim into num_splits Equal chunks .
8,123
def _zip_with_scalars ( args ) : zipped = [ ] for arg in args : if isinstance ( arg , prettytensor . PrettyTensor ) : zipped . append ( arg if arg . is_sequence ( ) else itertools . repeat ( arg ) ) elif ( isinstance ( arg , collections . Sequence ) and not isinstance ( arg , tf . compat . bytes_or_text_types ) ) : zipped . append ( arg ) else : zipped . append ( itertools . repeat ( arg ) ) assert len ( args ) == len ( zipped ) return zip ( * zipped )
Zips across args in order and replaces non - iterables with repeats .
8,124
def map_ ( input_layer , fn ) : if not input_layer . is_sequence ( ) : raise ValueError ( 'Can only map a sequence.' ) return [ fn ( x ) for x in input_layer ]
Maps the given function across this sequence .
8,125
def _map_or_apply ( input_layer , op , * args , ** kwargs ) : kwargs . pop ( 'name' ) right = kwargs . pop ( 'right_' , False ) if input_layer . is_sequence ( ) : if right : args += ( input_layer , ) else : args = ( ( input_layer , ) + args ) result = [ op ( * x , ** kwargs ) for x in _zip_with_scalars ( args ) ] if len ( result ) != len ( input_layer ) : raise ValueError ( 'Not all arguments were the same length.' ) return result else : if right : my_op = lambda x : op ( * ( args + ( x , ) ) , ** kwargs ) else : my_op = lambda x : op ( x , * args , ** kwargs ) return my_op ( input_layer . tensor )
Map op across the input if it is a sequence ; otherwise apply it .
8,126
def feed_numpy ( batch_size , * arrays ) : if not arrays : raise ValueError ( 'Arrays cannot be empty.' ) size = len ( arrays [ 0 ] ) for a in arrays : if size != len ( a ) : raise ValueError ( 'All arrays must be the same size.' ) count = int ( size / batch_size ) for i in xrange ( count ) : start = i * batch_size end = start + batch_size yield [ x [ start : end ] for x in arrays ] if count * batch_size < size : yield [ x [ end : ] for x in arrays ]
Given a set of numpy arrays produce slices of batch_size .
8,127
def batch ( input_iter , batch_size = 32 ) : input_iter = iter ( input_iter ) next_ = list ( itertools . islice ( input_iter , batch_size ) ) while next_ : yield next_ next_ = list ( itertools . islice ( input_iter , batch_size ) )
Batches data from an iterator that returns single items at a time .
8,128
def slice_constant ( data , batch_size = 32 , name = 'constant_data' , global_step = None ) : with tf . name_scope ( name ) : all_data = tf . convert_to_tensor ( data ) global_step = global_step or bookkeeper . global_step ( ) count = len ( data ) / batch_size extra = len ( data ) - count * batch_size if extra : offset = tf . mod ( global_step , count ) return tf . slice ( all_data , offset * batch_size , batch_size ) else : offset = tf . mod ( global_step , count + 1 ) return tf . slice ( all_data , offset * batch_size , tf . where ( tf . equal ( offset , count ) , extra , batch_size ) )
Provide a slice based on the global_step .
8,129
def session ( self , master = '' , config = None ) : session_manager = SESSION_MANAGER_FACTORY ( ) with session_manager . prepare_session ( master , None , config = config , init_fn = lambda _ : None ) as sess : try : yield sess finally : self . stop_queues ( )
Takes care of starting any local servers and stopping queues on exit .
8,130
def prepare_model ( self , sess , allow_initialize = True ) : if self . _follower : self . wait_for_initialization ( ) else : self . _init_model ( sess , allow_initialize ) if sess is not self . _sess : if self . threads : raise ValueError ( 'You must call stop_queues() before ' 'starting a new session with QueueRunners.' ) self . _sess = sess self . _start_threads ( sess )
Initialize the model and if necessary launch the queue runners .
8,131
def load_from_checkpoint ( self , sess , latest_filename = None ) : self . _create_initializers ( ) if self . _save_path : ckpt = tf . train . get_checkpoint_state ( os . path . dirname ( self . _save_path ) , latest_filename ) if ckpt and ckpt . all_model_checkpoint_paths : self . _saver = tf . train . Saver ( saver_def = self . _saver . as_saver_def ( ) ) self . _saver . set_last_checkpoints ( list ( ckpt . all_model_checkpoint_paths ) ) if self . _saver . last_checkpoints : self . _saver . restore ( sess , self . _saver . last_checkpoints [ - 1 ] ) return self . _saver . last_checkpoints [ - 1 ] else : return None
Loads the model from the most recent checkpoint .
8,132
def run_model ( self , op_list , num_steps , feed_vars = ( ) , feed_data = None , print_every = 100 , allow_initialize = True ) : feed_data = feed_data or itertools . repeat ( ( ) ) ops = [ bookkeeper . global_step ( ) ] ops . extend ( op_list ) sess = tf . get_default_session ( ) self . prepare_model ( sess , allow_initialize = allow_initialize ) results = [ ] try : if num_steps is None : counter = itertools . count ( 0 ) elif num_steps >= 0 : counter = xrange ( num_steps ) else : raise ValueError ( 'num_steps cannot be negative: %s' % num_steps ) for i , data in zip ( counter , feed_data ) : log_this_time = print_every and i % print_every == 0 if len ( data ) != len ( feed_vars ) : raise ValueError ( 'feed_data and feed_vars must be the same length: %d vs %d' % ( len ( data ) , len ( feed_vars ) ) ) if self . _coord . should_stop ( ) : print ( 'Coordinator stopped' ) sys . stdout . flush ( ) self . stop_queues ( ) break if len ( feed_vars ) != len ( data ) : raise ValueError ( 'Feed vars must be the same length as data.' ) if log_this_time and self . _summary_writer : results = sess . run ( ops + [ self . _summaries ] , dict ( zip ( feed_vars , data ) ) ) self . _summary_writer . add_summary ( results [ - 1 ] , results [ 0 ] ) results = results [ : - 1 ] else : results = sess . run ( ops , dict ( zip ( feed_vars , data ) ) ) if log_this_time : self . _log_and_save ( sess , results ) if print_every and not log_this_time : self . _log_and_save ( sess , results ) except tf . errors . OutOfRangeError as ex : print ( 'Done training -- epoch limit reached %s' % ex . message ) sys . stdout . flush ( ) self . stop_queues ( ) except BaseException as ex : print ( 'Exception -- stopping threads: %s' % ex , file = sys . stderr ) sys . stdout . flush ( ) self . stop_queues ( ) raise return results
Runs op_list for num_steps .
8,133
def train_model ( self , train_op , cost_to_log , num_steps , feed_vars = ( ) , feed_data = None , print_every = 100 ) : costs = [ train_op ] if ( isinstance ( cost_to_log , collections . Sequence ) and not isinstance ( cost_to_log , six . string_types ) ) : costs . extend ( cost_to_log ) else : costs . append ( cost_to_log ) return self . run_model ( costs , num_steps , feed_vars = feed_vars , feed_data = feed_data , print_every = print_every ) [ 2 : ]
Trains the given model .
8,134
def evaluate_model ( self , accuracy , num_steps , feed_vars = ( ) , feed_data = None , summary_tag = None , print_every = 0 ) : if not hasattr ( self , '_saver' ) : raise ValueError ( 'Before evaluating, you must initialize the model with ' 'load_from_checkpoint, prepare or saver.' ) self . _run_init_test_vars_op ( ) if ( not isinstance ( accuracy , collections . Sequence ) or isinstance ( accuracy , six . string_types ) ) : accuracy = ( accuracy , ) if summary_tag : summary_tag = ( summary_tag , ) if summary_tag and len ( summary_tag ) != len ( accuracy ) : raise ValueError ( 'If summaries are requested, there must be a tag per accuracy node.' ) result = self . run_model ( accuracy , num_steps , feed_vars = feed_vars , feed_data = feed_data , print_every = print_every , allow_initialize = False ) assert len ( result ) == len ( accuracy ) + 1 , ( 'results is wrong length, was %s but should be 1 longer than %s' % ( result , accuracy ) ) if summary_tag : self . add_summaries ( result [ 0 ] , * zip ( summary_tag , result [ 1 : ] ) ) return result [ 1 : ]
Evaluates the given model .
8,135
def add_summaries ( self , step , * tags_and_values ) : values = [ ] to_print = [ ] for tag , value in tags_and_values : values . append ( tf . Summary . Value ( tag = tag , simple_value = float ( value ) ) ) to_print . append ( '%s=%g' % ( tag , value ) ) if self . _summary_writer : summary = tf . Summary ( value = values ) event = tf . Event ( wall_time = time . time ( ) , summary = summary , step = int ( step ) ) self . _summary_writer . add_event ( event ) print ( '[%d] %s' % ( step , ', ' . join ( to_print ) ) )
Adds summaries to the writer and prints a log statement .
8,136
def load_new_checkpoint_when_available ( self , sess , current_checkpoint , sleep_seconds = 10 ) : while True : next_checkpoint = self . load_from_checkpoint ( sess ) if not next_checkpoint or next_checkpoint == current_checkpoint : print ( 'Model not yet available, sleeping for %d seconds: ' 'path %s; found: %s' % ( sleep_seconds , os . path . dirname ( self . _save_path ) , current_checkpoint ) ) sys . stdout . flush ( ) time . sleep ( sleep_seconds ) else : return next_checkpoint
Waits for a new checkpoint to be available and then loads it .
8,137
def evaluate_repeatedly ( self , accuracy , num_steps , feed_vars = ( ) , feed_data = None , summary_tag = None , evaluation_times = - 1 ) : current_checkpoint = None try : for i in itertools . count ( 0 ) : with self . session ( ) as sess : current_checkpoint = self . load_new_checkpoint_when_available ( sess , current_checkpoint ) self . _run_init_test_vars_op ( ) accuracy_result = self . evaluate_model ( accuracy , num_steps , summary_tag = summary_tag , print_every = 0 , feed_vars = feed_vars , feed_data = feed_data ) if not summary_tag : print ( '[%d] %s' % ( sess . run ( bookkeeper . global_step ( ) ) , accuracy_result ) ) if ( i + 1 ) == evaluation_times : return accuracy_result finally : print ( 'Shutting down' ) sys . stdout . flush ( ) self . stop_queues ( )
Runs the evaluation in a loop for evaluation_times .
8,138
def create_model ( text_in , labels , timesteps , per_example_weights , phase = pt . Phase . train ) : with pt . defaults_scope ( phase = phase , l2loss = 0.00001 ) : with tf . device ( '/cpu:0' ) : embedded = text_in . embedding_lookup ( CHARS , [ EMBEDDING_SIZE ] ) lstm = ( embedded . cleave_sequence ( timesteps ) . sequence_lstm ( CHARS ) ) return ( lstm . squash_sequence ( ) . fully_connected ( 32 , activation_fn = tf . nn . relu ) . dropout ( 0.7 ) . softmax_classifier ( SEXES , labels , per_example_weights = per_example_weights ) )
Creates a model for running baby names .
8,139
def for_default_graph ( * args , ** kwargs ) : graph = tf . get_default_graph ( ) collection = graph . get_collection ( _BOOKKEEPER ) if collection : if args or kwargs : raise ValueError ( 'Requesting construction of a BookKeeper that already ' 'exists: %s %s' % ( args , kwargs ) ) return collection [ 0 ] else : books = BOOKKEEPER_FACTORY ( * args , g = graph , ** kwargs ) graph . add_to_collection ( _BOOKKEEPER , books ) return books
Creates a bookkeeper for the default graph .
8,140
def for_new_graph ( * args , ** kwargs ) : graph = tf . Graph ( ) with graph . as_default ( ) : return for_default_graph ( * args , ** kwargs )
Creates a Bookkeeper for a new graph .
8,141
def regroup_if_changed ( group , op_list , name = None ) : has_deltas = isinstance ( op_list , sequence_with_deltas . SequenceWithDeltas ) if ( group is None or len ( group . control_inputs ) != len ( op_list ) or ( has_deltas and op_list . has_changed ( ) ) ) : if has_deltas : op_list . mark ( ) if op_list : return tf . group ( * op_list , name = name ) else : return tf . no_op ( name = name ) else : return group
Creates a new group for op_list if it has changed .
8,142
def apply_optimizer ( optimizer , losses , regularize = True , include_marked = True , clip_gradients_by_norm = None , ** kwargs ) : books = for_default_graph ( ) g_step = kwargs . pop ( 'global_step' , books . global_step ) total_loss = books . create_composite_loss ( losses = losses , regularize = regularize , include_marked = include_marked ) grads_and_vars = optimizer . compute_gradients ( total_loss , ** kwargs ) if clip_gradients_by_norm is not None : clipped_grads_and_vars = [ ] for g , v in grads_and_vars : if isinstance ( g , tf . SparseTensor ) : cg = tf . SparseTensor ( tf . clip_by_norm ( g . values , clip_gradients_by_norm ) , g . indices , g . dense_shape ) elif isinstance ( g , tf . IndexedSlices ) : cg = tf . IndexedSlices ( tf . clip_by_norm ( g . values , clip_gradients_by_norm ) , g . indices ) else : cg = tf . clip_by_norm ( g , clip_gradients_by_norm ) clipped_grads_and_vars . append ( ( cg , v ) ) grads_and_vars = clipped_grads_and_vars train_op = optimizer . apply_gradients ( grads_and_vars , global_step = g_step ) return books . with_update_ops ( train_op )
Apply an optimizer to the graph and returns a train_op .
8,143
def _add_global_counter ( self ) : assert self . _global_step is None with self . g . as_default ( ) , self . g . name_scope ( None ) : try : self . _global_step = self . g . get_tensor_by_name ( 'global_step:0' ) except KeyError : self . _global_step = tf . Variable ( 0 , name = 'global_step' , trainable = False )
Adds a global counter called once for setup by
8,144
def add_scalar_summary ( self , x , tag = None ) : if not self . summary_collections : return with self . g . as_default ( ) : tag = tag or _tag_for ( x . name ) summary = ( tf . summary . scalar ( tag , x , collections = self . summary_collections ) ) return summary
Adds a scalar summary for x .
8,145
def add_histogram_summary ( self , x , tag = None ) : if not self . summary_collections : return with self . g . as_default ( ) : tag = tag or _tag_for ( x . name ) summary = tf . summary . histogram ( tag , x , collections = self . summary_collections ) return summary
Add a summary operation to visualize the histogram of x s values .
8,146
def exponential_moving_average ( self , var , avg_var = None , decay = 0.999 , ignore_nan = False ) : with self . _g . as_default ( ) : if decay < 0 or decay >= 1.0 : raise ValueError ( 'Decay is %5.2f, but has to be in [0, 1).' % decay ) if avg_var is None : avg_name = '%s_average' % _bare_var_name ( var ) with tf . control_dependencies ( None ) : with tf . name_scope ( avg_name + '/Initializer/' ) : if isinstance ( var , tf . Variable ) : init_val = var . initialized_value ( ) elif var . get_shape ( ) . is_fully_defined ( ) : init_val = tf . constant ( 0 , shape = var . get_shape ( ) , dtype = var . dtype . base_dtype ) else : init_val = tf . constant ( 0 , dtype = var . dtype . base_dtype ) avg_var = tf . Variable ( init_val , name = avg_name , trainable = False ) num_updates = tf . cast ( self . global_step , tf . float32 ) decay = tf . minimum ( decay , tf . maximum ( 0.9 , ( 1.0 + num_updates ) / ( 10.0 + num_updates ) ) ) with tf . device ( avg_var . device ) : if ignore_nan : var = tf . where ( tf . is_finite ( var ) , var , avg_var ) if var . get_shape ( ) . is_fully_defined ( ) : avg_update = tf . assign_sub ( avg_var , ( 1 - decay ) * ( avg_var - var ) ) else : avg_update = tf . assign ( avg_var , avg_var - ( 1 - decay ) * ( avg_var - var ) , validate_shape = False ) self . _g . add_to_collection ( GraphKeys . UPDATE_OPS , avg_update ) return avg_update
Calculates the exponential moving average .
8,147
def add_average_summary ( self , var , tag = None , decay = 0.999 , ignore_nan = True ) : if not self . summary_collections : return with self . g . as_default ( ) : if decay < 0.9 or decay >= 1.0 : raise ValueError ( 'Decay is %5.2f, but has to be in [0, 1).' % decay ) avg_var = self . exponential_moving_average ( var , decay = decay , ignore_nan = ignore_nan ) if tag is None : tag = _bare_var_name ( avg_var ) tag = self . g . unique_name ( tag ) self . add_scalar_summary ( avg_var , tag ) return avg_var
Add a summary with the moving average of var .
8,148
def add_loss ( self , loss , name = None , regularization = False , add_summaries = True ) : _ = name if regularization : self . _g . add_to_collection ( GraphKeys . REGULARIZATION_LOSSES , loss ) tf . add_to_collection ( GraphKeys . LOSSES , loss ) if add_summaries : self . add_scalar_summary ( loss , 'loss' ) self . add_average_summary ( loss , 'loss_average' )
Append a loss to the total loss for the network .
8,149
def add_state ( self , state_name , initial_state , batch_size = None ) : state_shape = initial_state . get_shape ( ) . as_list ( ) full_shape = [ batch_size ] + state_shape if not batch_size : shape_proto = self . _as_shape_proto ( [ 0 ] + state_shape ) batch_size = 1 else : shape_proto = self . _as_shape_proto ( [ batch_size ] + state_shape ) tiles = [ batch_size ] + ( [ 1 ] * len ( initial_state . get_shape ( ) ) ) feed_op = tf . placeholder_with_default ( tf . tile ( tf . expand_dims ( initial_state , [ 0 ] ) , tiles ) , shape = full_shape , name = '%s_feed' % state_name ) s = { 'feed_op' : feed_op , 'feed_type' : initial_state . dtype , 'feed_shape' : shape_proto } self . _states [ state_name ] = s
Adds a state to the state saver .
8,150
def to_dense_one_hot ( labels , class_count ) : if not isinstance ( class_count , tf . compat . integral_types ) : raise TypeError ( 'class_count must be an integer type.' ) if labels . dtype . base_dtype not in ( tf . int32 , tf . int64 ) : raise TypeError ( 'Labels must be an integer: %s' % labels . dtype ) if labels . get_shape ( ) . ndims != 1 : raise ValueError ( 'Labels must be a rank 1 tensor: %s' % labels . get_shape ( ) ) dtype = labels . dtype . base_dtype class_tensor = tf . convert_to_tensor ( class_count , dtype = dtype , name = 'class_count' ) batch = tf . gather ( tf . shape ( labels ) , 0 ) count = tf . expand_dims ( tf . range ( 0 , limit = batch ) , 1 ) labels = tf . expand_dims ( labels , 1 ) batch = tf . gather ( tf . shape ( labels ) , 0 ) if dtype != tf . int32 : count = tf . cast ( count , dtype ) batch = tf . cast ( batch , dtype ) result = tf . sparse_to_dense ( tf . concat ( [ count , labels ] , 1 ) , tf . concat ( [ tf . expand_dims ( batch , 0 ) , tf . expand_dims ( class_tensor , 0 ) ] , 0 ) , 1.0 , 0.0 ) result . set_shape ( [ labels . get_shape ( ) . dims [ 0 ] , class_count ] ) return result
Converts a vector that specified one - hot per batch into a dense version .
8,151
def _convert_and_assert_per_example_weights_compatible ( input_ , per_example_weights , dtype ) : per_example_weights = tf . convert_to_tensor ( per_example_weights , name = 'per_example_weights' , dtype = dtype ) if input_ . get_shape ( ) . ndims : expected_length = input_ . get_shape ( ) . dims [ 0 ] message = ( 'per_example_weights must have rank 1 and length %s, but was: %s' % ( expected_length , per_example_weights . get_shape ( ) ) ) else : expected_length = None message = ( 'per_example_weights must have rank 1 and length equal to the ' 'first dimension of inputs (unknown), but was: %s' % per_example_weights . get_shape ( ) ) if per_example_weights . get_shape ( ) . ndims not in ( 1 , None ) : raise ValueError ( message ) if not per_example_weights . get_shape ( ) . is_compatible_with ( ( expected_length , ) ) : raise ValueError ( message ) return per_example_weights
Converts per_example_weights to a tensor and validates the shape .
8,152
def apply_regression ( input_ , regression_fn , target , regression_args = ( ) , regression_kwargs = None , name = PROVIDED , loss_weight = None , per_example_weights = None ) : if regression_kwargs is None : regression_kwargs = { } if name is not None and 'name' not in regression_kwargs : regression_kwargs [ 'name' ] = name elif name is None : name = input_ . tensor . op . name tensor = input_ . tensor loss = regression_fn ( tensor , target , * regression_args , ** regression_kwargs ) if loss_weight is not None : loss *= loss_weight if per_example_weights is not None : per_example_weights = _convert_and_assert_per_example_weights_compatible ( input_ , per_example_weights , dtype = loss . dtype ) loss *= per_example_weights if name is None : name = loss . op . name if tensor . get_shape ( ) [ 0 ] . value is not None : avg_loss = tf . reduce_sum ( loss ) / tensor . get_shape ( ) [ 0 ] . value else : avg_loss = tf . reduce_mean ( loss ) return input_ . add_loss ( avg_loss , name = name )
Applies the given regression and adds the loss to the bookkeeper .
8,153
def binary_cross_entropy_with_logits ( input_ , target , name = PROVIDED , loss_weight = None , per_example_weights = None , per_output_weights = None ) : if target is None : raise ValueError ( 'target must be set' ) target = _convert_and_assert_tensors_compatible ( input_ , target ) with tf . name_scope ( 'stats' ) : selected , sum_retrieved , sum_relevant = _compute_precision_recall ( input_ , target , 0 , per_example_weights ) precision = selected / sum_retrieved recall = selected / sum_relevant if precision . get_shape ( ) . is_fully_defined ( ) : input_ . bookkeeper . add_average_summary ( precision , 'average_precision_%s' % name ) if recall . get_shape ( ) . is_fully_defined ( ) : input_ . bookkeeper . add_average_summary ( recall , 'average_recall_%s' % name ) input_ . bookkeeper . add_scalar_summary ( tf . reduce_sum ( tf . to_float ( tf . greater ( input_ , 0 ) ) ) , 'activations' ) if per_output_weights is not None : per_output_weights = tf . convert_to_tensor ( per_output_weights , name = 'per_output_weights' , dtype = input_ . dtype . base_dtype ) input_ . get_shape ( ) . assert_is_compatible_with ( per_output_weights . get_shape ( ) ) def _batch_sum_bce ( x , target , name = 'binary_cross_entropy' ) : logits = functions . binary_cross_entropy_loss_with_logits ( x , target , name = name ) if per_output_weights is not None : logits *= per_output_weights return functions . reduce_batch_sum ( logits ) return apply_regression ( input_ , _batch_sum_bce , target , [ ] , name = '%s_bce_loss' % name , loss_weight = loss_weight , per_example_weights = per_example_weights )
Calculates the binary cross entropy of the input_ vs inputs .
8,154
def softmax_classifier_with_sampled_loss ( inputs , num_classes , labels , num_sampled , num_true = None , sampled_values = None , remove_accidental_hits = True , loss_weight = None , per_example_weights = None , weights = None , bias = tf . zeros_initializer ( ) , parameter_modifier = parameters . identity , name = 'softmax_classifier' ) : input_copy = inputs . as_layer ( ) with tf . name_scope ( 'sampled_softmax' ) : full = inputs . fully_connected ( num_classes , activation_fn = None , name = name , transpose_weights = True , weights = weights , bias = bias , parameter_modifier = parameter_modifier ) if labels is not None : labels = tf . convert_to_tensor ( labels , dtype = tf . int64 , name = 'labels' ) labels . get_shape ( ) . assert_is_compatible_with ( [ input_copy . get_shape ( ) [ 0 ] , num_true ] ) if num_true is None : if labels . get_shape ( ) . ndims and labels . get_shape ( ) . dims [ 1 ] : num_true = labels . get_shape ( ) . dims [ 1 ] . value else : num_true = 1 def _loss ( input_ , labels , name = None ) : return tf . nn . sampled_softmax_loss ( weights = full . layer_parameters [ 'weights' ] , biases = full . layer_parameters [ 'bias' ] , labels = labels , inputs = input_ , num_sampled = num_sampled , num_classes = num_classes , num_true = num_true , sampled_values = sampled_values , remove_accidental_hits = remove_accidental_hits , name = name ) loss = apply_regression ( input_copy , _loss , labels , [ ] , name = '%s_sampled_loss' % name , loss_weight = loss_weight , per_example_weights = per_example_weights ) else : loss = None return SampledSoftmaxResult ( full , loss )
Applies softmax and if labels is not None then it adds a sampled loss .
8,155
def softmax_classifier ( input_ , num_classes , labels = None , loss_weight = None , per_example_weights = None , weights = None , bias = tf . zeros_initializer ( ) , parameter_modifier = parameters . identity , name = PROVIDED ) : full = input_ . fully_connected ( num_classes , activation_fn = None , name = name , weights = weights , bias = bias , parameter_modifier = parameter_modifier ) return full . softmax ( labels = labels , loss_weight = loss_weight , per_example_weights = per_example_weights , name = name )
Creates a fully - connected linear layer followed by a softmax .
8,156
def softmax ( input_ , labels = None , name = PROVIDED , loss_weight = None , per_example_weights = None ) : if labels is not None : full = input_ . as_layer ( ) return SoftmaxResult ( input_ . softmax_activation ( ) , full . cross_entropy ( labels , name = name , loss_weight = loss_weight , per_example_weights = per_example_weights ) ) else : return SoftmaxResult ( input_ . softmax_activation ( ) , None )
Applies softmax and if labels is not None then it also adds a loss .
8,157
def evaluate_precision_recall ( input_ , labels , threshold = 0.5 , per_example_weights = None , name = PROVIDED , phase = Phase . train ) : _ = name selected , sum_retrieved , sum_relevant = _compute_precision_recall ( input_ , labels , threshold , per_example_weights ) if phase != Phase . train : dtype = tf . float32 relevant_count = tf . get_variable ( 'relevant_count' , [ ] , dtype , tf . zeros_initializer ( ) , collections = [ bookkeeper . GraphKeys . TEST_VARIABLES ] , trainable = False ) retrieved_count = tf . get_variable ( 'retrieved_count' , [ ] , dtype , tf . zeros_initializer ( ) , collections = [ bookkeeper . GraphKeys . TEST_VARIABLES ] , trainable = False ) selected_count = tf . get_variable ( 'selected_count' , [ ] , dtype , tf . zeros_initializer ( ) , collections = [ bookkeeper . GraphKeys . TEST_VARIABLES ] , trainable = False ) with input_ . g . device ( selected_count . device ) : selected = tf . assign_add ( selected_count , selected ) with input_ . g . device ( retrieved_count . device ) : sum_retrieved = tf . assign_add ( retrieved_count , sum_retrieved ) with input_ . g . device ( relevant_count . device ) : sum_relevant = tf . assign_add ( relevant_count , sum_relevant ) return ( tf . where ( tf . equal ( sum_retrieved , 0 ) , tf . zeros_like ( selected ) , selected / sum_retrieved ) , tf . where ( tf . equal ( sum_relevant , 0 ) , tf . zeros_like ( selected ) , selected / sum_relevant ) )
Computes the precision and recall of the prediction vs the labels .
8,158
def _eval_metric ( input_ , topk , correct_predictions , examples , phase ) : my_parameters = { } if phase in ( Phase . test , Phase . infer ) : dtype = tf . float32 count = tf . Variable ( tf . constant ( 0 , dtype = dtype ) , name = 'count_%d' % topk , collections = [ bookkeeper . GraphKeys . TEST_VARIABLES ] , trainable = False ) correct = tf . Variable ( tf . constant ( 0 , dtype = dtype ) , name = 'correct_%d' % topk , collections = [ bookkeeper . GraphKeys . TEST_VARIABLES ] , trainable = False ) my_parameters [ 'count' ] = count my_parameters [ 'correct' ] = correct with input_ . g . device ( count . device ) : examples = tf . assign_add ( count , examples ) with input_ . g . device ( correct . device ) : correct_predictions = tf . assign_add ( correct , correct_predictions ) return correct_predictions , examples , my_parameters
Creates the standard tracking varibles if in test and returns accuracy .
8,159
def _compute_precision_recall ( input_ , labels , threshold , per_example_weights ) : labels . get_shape ( ) . assert_is_compatible_with ( input_ . get_shape ( ) ) relevant = tf . to_float ( tf . greater ( labels , 0 ) ) retrieved = tf . to_float ( tf . greater ( input_ , threshold ) ) selected = relevant * retrieved if per_example_weights is not None : per_example_weights = _convert_and_assert_per_example_weights_compatible ( input_ , per_example_weights , dtype = None ) per_example_weights = tf . to_float ( tf . greater ( per_example_weights , 0 ) ) selected = functions . reduce_batch_sum ( selected ) * per_example_weights relevant = functions . reduce_batch_sum ( relevant ) * per_example_weights retrieved = functions . reduce_batch_sum ( retrieved ) * per_example_weights sum_relevant = tf . reduce_sum ( relevant ) sum_retrieved = tf . reduce_sum ( retrieved ) selected = tf . reduce_sum ( selected ) return selected , sum_retrieved , sum_relevant
Returns the numerator of both the denominator of precision and recall .
8,160
def unroll_state_saver ( input_layer , name , state_shapes , template , lengths = None ) : state_saver = input_layer . bookkeeper . recurrent_state state_names = [ STATE_NAME % name + '_%d' % i for i in xrange ( len ( state_shapes ) ) ] if hasattr ( state_saver , 'add_state' ) : for state_name , state_shape in zip ( state_names , state_shapes ) : initial_state = tf . zeros ( state_shape [ 1 : ] , dtype = input_layer . dtype ) state_saver . add_state ( state_name , initial_state = initial_state , batch_size = state_shape [ 0 ] ) if lengths is not None : max_length = tf . reduce_max ( lengths ) else : max_length = None results = [ ] prev_states = [ ] for state_name , state_shape in zip ( state_names , state_shapes ) : my_shape = list ( state_shape ) my_shape [ 0 ] = - 1 prev_states . append ( tf . reshape ( state_saver . state ( state_name ) , my_shape ) ) my_parameters = None for i , layer in enumerate ( input_layer . sequence ) : with input_layer . g . name_scope ( 'unroll_%00d' % i ) : if i > 0 and max_length is not None : result = control_flow_ops . cond ( i < max_length , lambda : unwrap_all ( * template ( layer , * prev_states ) . flatten ( ) ) , lambda : unwrap_all ( out , * prev_states ) ) out = result [ 0 ] prev_states = result [ 1 : ] else : out , prev_states = template ( layer , * prev_states ) if my_parameters is None : my_parameters = out . layer_parameters results . append ( prettytensor . unwrap ( out ) ) updates = [ state_saver . save_state ( state_name , prettytensor . unwrap ( prev_state ) ) for state_name , prev_state in zip ( state_names , prev_states ) ] with tf . control_dependencies ( updates ) : results [ 0 ] = tf . identity ( results [ 0 ] ) return input_layer . with_sequence ( results , parameters = my_parameters )
Unrolls the given function with state taken from the state saver .
8,161
def cleave_sequence ( input_layer , unroll = None ) : if unroll is None : raise ValueError ( 'You must set unroll either here or in the defaults.' ) shape = input_layer . shape if shape [ 0 ] is not None and shape [ 0 ] % unroll != 0 : raise ValueError ( 'Must divide the split dimension evenly: %d mod %d != 0' % ( shape [ 0 ] , unroll ) ) if unroll <= 0 : raise ValueError ( 'Unroll must be > 0: %s' % unroll ) elif unroll == 1 : splits = [ input_layer . tensor ] else : splits = tf . split ( value = input_layer . tensor , num_or_size_splits = unroll , axis = 0 ) result = input_layer . with_sequence ( splits ) defaults = result . defaults if 'unroll' in defaults : del defaults [ 'unroll' ] return result
Cleaves a tensor into a sequence this is the inverse of squash .
8,162
def create_sequence_pretty_tensor ( sequence_input , shape = None , save_state = True ) : inputs = prettytensor . wrap_sequence ( sequence_input . inputs , tensor_shape = shape ) targets = prettytensor . wrap_sequence ( sequence_input . targets ) if save_state : bookkeeper . set_recurrent_state_saver ( sequence_input ) return inputs , targets
Creates a PrettyTensor object for the given sequence .
8,163
def flatten ( self ) : ls = [ self . output ] ls . extend ( self . state ) return ls
Create a flattened version by putting output first and then states .
8,164
def run ( self , fetch_list , feed_dict = None , sess = None ) : if tf . get_default_graph ( ) != self . _graph : raise ValueError ( 'The current default graph is different from the graph' ' used at construction time of RecurrentRunner.' ) if feed_dict is None : all_feeds_dict = { } else : all_feeds_dict = dict ( feed_dict ) all_feeds_dict . update ( self . _state_feeds ) all_fetches_list = list ( fetch_list ) all_fetches_list += self . _state_fetches sess = sess or tf . get_default_session ( ) fetches = sess . run ( all_fetches_list , all_feeds_dict ) states = fetches [ len ( fetch_list ) : ] for i , s in enumerate ( states ) : self . _state_feeds [ self . _state_feed_names [ i ] ] = s return fetches [ : len ( fetch_list ) ]
Runs the graph with the provided feeds and fetches .
8,165
def __get_vpc_info ( self , ifarr ) : if ( self . vpc_vbtbl == None ) : self . vpc_vbtbl = self . snmpobj . get_bulk ( OID_VPC_PEERLINK_IF ) if ( ( self . vpc_vbtbl == None ) | ( len ( self . vpc_vbtbl ) == 0 ) ) : return ( None , None ) domain = natlas_snmp . get_last_oid_token ( self . vpc_vbtbl [ 0 ] [ 0 ] [ 0 ] ) ifidx = str ( self . vpc_vbtbl [ 0 ] [ 0 ] [ 1 ] ) ifname = self . snmpobj . cache_lookup ( ifarr , OID_ETH_IF_DESC + '.' + ifidx ) ifname = self . shorten_port_name ( ifname ) return ( domain , ifname )
If VPC is enabled Return the VPC domain and interface name of the VPC peerlink .
8,166
def get_macs ( self , ip , display_progress ) : if ( ip == '0.0.0.0' ) : return None ret_macs = [ ] snmpobj = natlas_snmp ( ip ) if ( snmpobj . get_cred ( self . config . snmp_creds ) == 0 ) : return None system_name = util . shorten_host_name ( snmpobj . get_val ( OID_SYSNAME ) , self . config . host_domains ) vlan_vbtbl = snmpobj . get_bulk ( OID_VLANS ) ifname_vbtbl = snmpobj . get_bulk ( OID_IFNAME ) for vlan_row in vlan_vbtbl : for vlan_n , vlan_v in vlan_row : vlan = natlas_snmp . get_last_oid_token ( vlan_n ) if ( vlan >= 1002 ) : continue vmacs = self . get_macs_for_vlan ( ip , vlan , display_progress , snmpobj , system_name , ifname_vbtbl ) if ( vmacs != None ) : ret_macs . extend ( vmacs ) if ( display_progress == 1 ) : print ( '' ) return ret_macs
Return array of MAC addresses from single node at IP
8,167
def get_macs_for_vlan ( self , ip , vlan , display_progress = 0 , snmpobj = None , system_name = None , ifname_vbtbl = None ) : ret_macs = [ ] if ( snmpobj == None ) : snmpobj = natlas_snmp ( ip ) if ( snmpobj . get_cred ( self . config . snmp_creds ) == 0 ) : return None if ( ifname_vbtbl == None ) : ifname_vbtbl = snmpobj . get_bulk ( OID_IFNAME ) if ( system_name == None ) : system_name = util . shorten_host_name ( snmpobj . get_val ( OID_SYSNAME ) , self . config . host_domains ) old_cred = snmpobj . v2_community snmpobj . v2_community = old_cred + '@' + str ( vlan ) if ( display_progress == 1 ) : sys . stdout . write ( str ( vlan ) ) sys . stdout . flush ( ) cam_vbtbl = snmpobj . get_bulk ( OID_VLAN_CAM ) portnum_vbtbl = snmpobj . get_bulk ( OID_BRIDGE_PORTNUMS ) ifindex_vbtbl = snmpobj . get_bulk ( OID_IFINDEX ) cam_match = None if ( cam_vbtbl == None ) : return None for cam_row in cam_vbtbl : for cam_n , cam_v in cam_row : cam_entry = natlas_mac . mac_format_ascii ( cam_v , 0 ) p = cam_n . getOid ( ) portnum_oid = '%s.%i.%i.%i.%i.%i.%i' % ( OID_BRIDGE_PORTNUMS , p [ 11 ] , p [ 12 ] , p [ 13 ] , p [ 14 ] , p [ 15 ] , p [ 16 ] ) bridge_portnum = snmpobj . cache_lookup ( portnum_vbtbl , portnum_oid ) try : ifidx = snmpobj . cache_lookup ( ifindex_vbtbl , OID_IFINDEX + '.' + bridge_portnum ) port = snmpobj . cache_lookup ( ifname_vbtbl , OID_IFNAME + '.' + ifidx ) except TypeError : port = 'None' mac_addr = natlas_mac . mac_format_ascii ( cam_v , 1 ) if ( display_progress == 1 ) : sys . stdout . write ( '.' ) sys . stdout . flush ( ) entry = natlas_mac . mac_object ( system_name , ip , vlan , mac_addr , port ) ret_macs . append ( entry ) snmpobj . v2_community = old_cred return ret_macs
Return array of MAC addresses for a single VLAN from a single node at an IP
8,168
def mac_hex_to_ascii ( mac_hex , inc_dots ) : v = mac_hex [ 2 : ] ret = '' for i in range ( 0 , len ( v ) , 4 ) : ret += v [ i : i + 4 ] if ( ( inc_dots ) & ( ( i + 4 ) < len ( v ) ) ) : ret += '.' return ret
Format a hex MAC string to ASCII
8,169
def get_switch_macs ( self , switch_ip = None , node = None , vlan = None , mac = None , port = None , verbose = 0 ) : if ( switch_ip == None ) : if ( node == None ) : raise Exception ( 'get_switch_macs() requires switch_ip or node parameter' ) return None switch_ip = node . get_ipaddr ( ) mac_obj = natlas_mac ( self . config ) if ( vlan == None ) : macs = mac_obj . get_macs ( switch_ip , verbose ) else : macs = mac_obj . get_macs_for_vlan ( switch_ip , vlan , verbose ) if ( ( mac == None ) & ( port == None ) ) : return macs if macs else [ ] ret = [ ] for m in macs : if ( mac != None ) : if ( re . match ( mac , m . mac ) == None ) : continue if ( port != None ) : if ( re . match ( port , m . port ) == None ) : continue ret . append ( m ) return ret
Get the CAM table from a switch .
8,170
def get_arp_table ( self , switch_ip , ip = None , mac = None , interf = None , arp_type = None ) : node = natlas_node ( switch_ip ) if ( node . try_snmp_creds ( self . config . snmp_creds ) == 0 ) : return [ ] arp = node . get_arp_table ( ) if ( arp == None ) : return [ ] if ( ( ip == None ) & ( mac == None ) & ( interf == None ) & ( arp_type == None ) ) : return arp interf = str ( interf ) if vlan else None ret = [ ] for a in arp : if ( ip != None ) : if ( re . match ( ip , a . ip ) == None ) : continue if ( mac != None ) : if ( re . match ( mac , a . mac ) == None ) : continue if ( interf != None ) : if ( re . match ( interf , str ( a . interf ) ) == None ) : continue if ( arp_type != None ) : if ( re . match ( arp_type , a . arp_type ) == None ) : continue ret . append ( a ) return ret
Get the ARP table from a switch .
8,171
def __query_node ( self , ip , host ) : host = util . shorten_host_name ( host , self . config . host_domains ) node , node_updated = self . __get_known_node ( ip , host ) if ( node == None ) : node = natlas_node ( ) node . name = host node . ip = [ ip ] state = NODE_NEW else : if ( node . snmpobj . success == 1 ) : return ( node , NODE_KNOWN ) if ( node_updated == 1 ) : state = NODE_NEWIP else : state = NODE_KNOWN node . name = host if ( ip == 'UNKNOWN' ) : return ( node , state ) if ( ( ip == '0.0.0.0' ) | ( ip == '' ) ) : return ( node , state ) if ( node . try_snmp_creds ( self . config . snmp_creds ) == 0 ) : return ( node , state ) node . name = node . get_system_name ( self . config . host_domains ) if ( node . name != host ) : if ( state == NODE_NEW ) : node2 , node_updated2 = self . __get_known_node ( ip , host ) if ( ( node2 != None ) & ( node_updated2 == 0 ) ) : return ( node , NODE_KNOWN ) if ( node_updated2 == 1 ) : state = NODE_NEWIP if ( ( node . name == None ) | ( node . name == '' ) ) : node . name = node . get_ipaddr ( ) node . opts . get_serial = True node . query_node ( ) return ( node , state )
Query this node . Return node details and if we already knew about it or if this is a new node . Don t save the node to the known list just return info about it .
8,172
def __get_known_node ( self , ip , host ) : for ex in self . nodes : for exip in ex . ip : if ( exip == '0.0.0.0' ) : continue if ( exip == ip ) : return ( ex , 0 ) node = self . __get_known_node_by_host ( host ) if ( node != None ) : if ( ip not in node . ip ) : node . ip . append ( ip ) return ( node , 1 ) return ( node , 0 ) return ( None , 0 )
Look for known nodes by IP and HOST . If found by HOST add the IP if not already known .
8,173
def __get_known_node_by_host ( self , hostname ) : for n in self . nodes : if ( n . name == hostname ) : return n return None
Determine if the node is already known by hostname . If it is return it .
8,174
def create_file_and_add_volume ( self , runtime , volume , host_outdir_tgt , secret_store , tmpdir_prefix ) : if not host_outdir_tgt : tmp_dir , tmp_prefix = os . path . split ( tmpdir_prefix ) new_file = os . path . join ( tempfile . mkdtemp ( prefix = tmp_prefix , dir = tmp_dir ) , os . path . basename ( volume . resolved ) ) writable = True if volume . type == "CreateWritableFile" else False if secret_store : contents = secret_store . retrieve ( volume . resolved ) else : contents = volume . resolved dirname = os . path . dirname ( host_outdir_tgt or new_file ) if not os . path . exists ( dirname ) : os . makedirs ( dirname ) with open ( host_outdir_tgt or new_file , "wb" ) as file_literal : file_literal . write ( contents . encode ( "utf-8" ) ) if not host_outdir_tgt : self . append_volume ( runtime , new_file , volume . target , writable = writable ) if writable : ensure_writable ( host_outdir_tgt or new_file ) else : ensure_non_writable ( host_outdir_tgt or new_file ) return host_outdir_tgt or new_file
Create the file and add a mapping .
8,175
def add_volumes ( self , pathmapper , runtime , tmpdir_prefix , secret_store = None , any_path_okay = False ) : container_outdir = self . builder . outdir for key , vol in ( itm for itm in pathmapper . items ( ) if itm [ 1 ] . staged ) : host_outdir_tgt = None if vol . target . startswith ( container_outdir + "/" ) : host_outdir_tgt = os . path . join ( self . outdir , vol . target [ len ( container_outdir ) + 1 : ] ) if not host_outdir_tgt and not any_path_okay : raise WorkflowException ( "No mandatory DockerRequirement, yet path is outside " "the designated output directory, also know as " "$(runtime.outdir): {}" . format ( vol ) ) if vol . type in ( "File" , "Directory" ) : self . add_file_or_directory_volume ( runtime , vol , host_outdir_tgt ) elif vol . type == "WritableFile" : self . add_writable_file_volume ( runtime , vol , host_outdir_tgt , tmpdir_prefix ) elif vol . type == "WritableDirectory" : self . add_writable_directory_volume ( runtime , vol , host_outdir_tgt , tmpdir_prefix ) elif vol . type in [ "CreateFile" , "CreateWritableFile" ] : new_path = self . create_file_and_add_volume ( runtime , vol , host_outdir_tgt , secret_store , tmpdir_prefix ) pathmapper . update ( key , new_path , vol . target , vol . type , vol . staged )
Append volume mappings to the runtime option list .
8,176
def docker_monitor ( self , cidfile , tmpdir_prefix , cleanup_cidfile , process ) : cid = None while cid is None : time . sleep ( 1 ) if process . returncode is not None : if cleanup_cidfile : os . remove ( cidfile ) return try : with open ( cidfile ) as cidhandle : cid = cidhandle . readline ( ) . strip ( ) except ( OSError , IOError ) : cid = None max_mem = self . docker_get_memory ( cid ) tmp_dir , tmp_prefix = os . path . split ( tmpdir_prefix ) stats_file = tempfile . NamedTemporaryFile ( prefix = tmp_prefix , dir = tmp_dir ) with open ( stats_file . name , mode = "w" ) as stats_file_handle : stats_proc = subprocess . Popen ( [ 'docker' , 'stats' , '--no-trunc' , '--format' , '{{.MemPerc}}' , cid ] , stdout = stats_file_handle , stderr = subprocess . DEVNULL ) process . wait ( ) stats_proc . kill ( ) max_mem_percent = 0 with open ( stats_file . name , mode = "r" ) as stats : for line in stats : try : mem_percent = float ( re . sub ( CONTROL_CODE_RE , '' , line ) . replace ( '%' , '' ) ) if mem_percent > max_mem_percent : max_mem_percent = mem_percent except ValueError : break _logger . info ( u"[job %s] Max memory used: %iMiB" , self . name , int ( ( max_mem_percent * max_mem ) / ( 2 ** 20 ) ) ) if cleanup_cidfile : os . remove ( cidfile )
Record memory usage of the running Docker container .
8,177
def adjustFiles ( rec , op ) : if isinstance ( rec , MutableMapping ) : if rec . get ( "class" ) == "File" : rec [ "path" ] = op ( rec [ "path" ] ) for d in rec : adjustFiles ( rec [ d ] , op ) if isinstance ( rec , MutableSequence ) : for d in rec : adjustFiles ( d , op )
Apply a mapping function to each File path in the object rec .
8,178
def check_types ( srctype , sinktype , linkMerge , valueFrom ) : if valueFrom is not None : return "pass" if linkMerge is None : if can_assign_src_to_sink ( srctype , sinktype , strict = True ) : return "pass" if can_assign_src_to_sink ( srctype , sinktype , strict = False ) : return "warning" return "exception" if linkMerge == "merge_nested" : return check_types ( { "items" : _get_type ( srctype ) , "type" : "array" } , _get_type ( sinktype ) , None , None ) if linkMerge == "merge_flattened" : return check_types ( merge_flatten_type ( _get_type ( srctype ) ) , _get_type ( sinktype ) , None , None ) raise WorkflowException ( u"Unrecognized linkMerge enum '{}'" . format ( linkMerge ) )
Check if the source and sink types are pass warning or exception .
8,179
def merge_flatten_type ( src ) : if isinstance ( src , MutableSequence ) : return [ merge_flatten_type ( t ) for t in src ] if isinstance ( src , MutableMapping ) and src . get ( "type" ) == "array" : return src return { "items" : src , "type" : "array" }
Return the merge flattened type of the source type
8,180
def can_assign_src_to_sink ( src , sink , strict = False ) : if src == "Any" or sink == "Any" : return True if isinstance ( src , MutableMapping ) and isinstance ( sink , MutableMapping ) : if sink . get ( "not_connected" ) and strict : return False if src [ "type" ] == "array" and sink [ "type" ] == "array" : return can_assign_src_to_sink ( src [ "items" ] , sink [ "items" ] , strict ) if src [ "type" ] == "record" and sink [ "type" ] == "record" : return _compare_records ( src , sink , strict ) if src [ "type" ] == "File" and sink [ "type" ] == "File" : for sinksf in sink . get ( "secondaryFiles" , [ ] ) : if not [ 1 for srcsf in src . get ( "secondaryFiles" , [ ] ) if sinksf == srcsf ] : if strict : return False return True return can_assign_src_to_sink ( src [ "type" ] , sink [ "type" ] , strict ) if isinstance ( src , MutableSequence ) : if strict : for this_src in src : if not can_assign_src_to_sink ( this_src , sink ) : return False return True for this_src in src : if can_assign_src_to_sink ( this_src , sink ) : return True return False if isinstance ( sink , MutableSequence ) : for this_sink in sink : if can_assign_src_to_sink ( src , this_sink ) : return True return False return src == sink
Check for identical type specifications ignoring extra keys like inputBinding .
8,181
def _compare_records ( src , sink , strict = False ) : def _rec_fields ( rec ) : out = { } for field in rec [ "fields" ] : name = shortname ( field [ "name" ] ) out [ name ] = field [ "type" ] return out srcfields = _rec_fields ( src ) sinkfields = _rec_fields ( sink ) for key in six . iterkeys ( sinkfields ) : if ( not can_assign_src_to_sink ( srcfields . get ( key , "null" ) , sinkfields . get ( key , "null" ) , strict ) and sinkfields . get ( key ) is not None ) : _logger . info ( "Record comparison failure for %s and %s\n" "Did not match fields for %s: %s and %s" , src [ "name" ] , sink [ "name" ] , key , srcfields . get ( key ) , sinkfields . get ( key ) ) return False return True
Compare two records ensuring they have compatible fields .
8,182
def check_all_types ( src_dict , sinks , sourceField ) : validation = { "warning" : [ ] , "exception" : [ ] } for sink in sinks : if sourceField in sink : valueFrom = sink . get ( "valueFrom" ) if isinstance ( sink [ sourceField ] , MutableSequence ) : srcs_of_sink = [ src_dict [ parm_id ] for parm_id in sink [ sourceField ] ] linkMerge = sink . get ( "linkMerge" , ( "merge_nested" if len ( sink [ sourceField ] ) > 1 else None ) ) else : parm_id = sink [ sourceField ] srcs_of_sink = [ src_dict [ parm_id ] ] linkMerge = None for src in srcs_of_sink : check_result = check_types ( src , sink , linkMerge , valueFrom ) if check_result == "warning" : validation [ "warning" ] . append ( SrcSink ( src , sink , linkMerge ) ) elif check_result == "exception" : validation [ "exception" ] . append ( SrcSink ( src , sink , linkMerge ) ) return validation
Given a list of sinks check if their types match with the types of their sources .
8,183
def output_callback ( self , out , process_status ) : self . final_status . append ( process_status ) self . final_output . append ( out )
Collect the final status and outputs .
8,184
def _runner ( self , job , runtime_context ) : try : job . run ( runtime_context ) except WorkflowException as err : _logger . exception ( "Got workflow error" ) self . exceptions . append ( err ) except Exception as err : _logger . exception ( "Got workflow error" ) self . exceptions . append ( WorkflowException ( Text ( err ) ) ) finally : with runtime_context . workflow_eval_lock : self . threads . remove ( threading . current_thread ( ) ) if isinstance ( job , JobBase ) : self . allocated_ram -= job . builder . resources [ "ram" ] self . allocated_cores -= job . builder . resources [ "cores" ] runtime_context . workflow_eval_lock . notifyAll ( )
Job running thread .
8,185
def run_job ( self , job , runtime_context ) : if job is not None : with self . pending_jobs_lock : self . pending_jobs . append ( job ) with self . pending_jobs_lock : n = 0 while ( n + 1 ) <= len ( self . pending_jobs ) : job = self . pending_jobs [ n ] if isinstance ( job , JobBase ) : if ( ( job . builder . resources [ "ram" ] ) > self . max_ram or ( job . builder . resources [ "cores" ] ) > self . max_cores ) : _logger . error ( 'Job "%s" cannot be run, requests more resources (%s) ' 'than available on this host (max ram %d, max cores %d' , job . name , job . builder . resources , self . allocated_ram , self . allocated_cores , self . max_ram , self . max_cores ) self . pending_jobs . remove ( job ) return if ( ( self . allocated_ram + job . builder . resources [ "ram" ] ) > self . max_ram or ( self . allocated_cores + job . builder . resources [ "cores" ] ) > self . max_cores ) : _logger . debug ( 'Job "%s" cannot run yet, resources (%s) are not ' 'available (already allocated ram is %d, allocated cores is %d, ' 'max ram %d, max cores %d' , job . name , job . builder . resources , self . allocated_ram , self . allocated_cores , self . max_ram , self . max_cores ) n += 1 continue thread = threading . Thread ( target = self . _runner , args = ( job , runtime_context ) ) thread . daemon = True self . threads . add ( thread ) if isinstance ( job , JobBase ) : self . allocated_ram += job . builder . resources [ "ram" ] self . allocated_cores += job . builder . resources [ "cores" ] thread . start ( ) self . pending_jobs . remove ( job )
Execute a single Job in a seperate thread .
8,186
def wait_for_next_completion ( self , runtime_context ) : if runtime_context . workflow_eval_lock is not None : runtime_context . workflow_eval_lock . wait ( ) if self . exceptions : raise self . exceptions [ 0 ]
Wait for jobs to finish .
8,187
def append_volume ( runtime , source , target , writable = False ) : runtime . append ( u"--volume={}:{}:{}" . format ( docker_windows_path_adjust ( source ) , target , "rw" if writable else "ro" ) )
Add binding arguments to the runtime list .
8,188
def add_writable_file_volume ( self , runtime , volume , host_outdir_tgt , tmpdir_prefix ) : if self . inplace_update : self . append_volume ( runtime , volume . resolved , volume . target , writable = True ) else : if host_outdir_tgt : if not os . path . exists ( os . path . dirname ( host_outdir_tgt ) ) : os . makedirs ( os . path . dirname ( host_outdir_tgt ) ) shutil . copy ( volume . resolved , host_outdir_tgt ) else : tmp_dir , tmp_prefix = os . path . split ( tmpdir_prefix ) tmpdir = tempfile . mkdtemp ( prefix = tmp_prefix , dir = tmp_dir ) file_copy = os . path . join ( tmpdir , os . path . basename ( volume . resolved ) ) shutil . copy ( volume . resolved , file_copy ) self . append_volume ( runtime , file_copy , volume . target , writable = True ) ensure_writable ( host_outdir_tgt or file_copy )
Append a writable file mapping to the runtime option list .
8,189
def add_writable_directory_volume ( self , runtime , volume , host_outdir_tgt , tmpdir_prefix ) : if volume . resolved . startswith ( "_:" ) : if not host_outdir_tgt : tmp_dir , tmp_prefix = os . path . split ( tmpdir_prefix ) new_dir = os . path . join ( tempfile . mkdtemp ( prefix = tmp_prefix , dir = tmp_dir ) , os . path . basename ( volume . target ) ) self . append_volume ( runtime , new_dir , volume . target , writable = True ) elif not os . path . exists ( host_outdir_tgt ) : os . makedirs ( host_outdir_tgt ) else : if self . inplace_update : self . append_volume ( runtime , volume . resolved , volume . target , writable = True ) else : if not host_outdir_tgt : tmp_dir , tmp_prefix = os . path . split ( tmpdir_prefix ) tmpdir = tempfile . mkdtemp ( prefix = tmp_prefix , dir = tmp_dir ) new_dir = os . path . join ( tmpdir , os . path . basename ( volume . resolved ) ) shutil . copytree ( volume . resolved , new_dir ) self . append_volume ( runtime , new_dir , volume . target , writable = True ) else : shutil . copytree ( volume . resolved , host_outdir_tgt ) ensure_writable ( host_outdir_tgt or new_dir )
Append a writable directory mapping to the runtime option list .
8,190
def make ( self , cwl ) : load = load_tool . load_tool ( cwl , self . loading_context ) if isinstance ( load , int ) : raise Exception ( "Error loading tool" ) return Callable ( load , self )
Instantiate a CWL object from a CWl document .
8,191
def realize_input_schema ( input_types , schema_defs ) : for index , entry in enumerate ( input_types ) : if isinstance ( entry , string_types ) : if '#' in entry : _ , input_type_name = entry . split ( '#' ) else : input_type_name = entry if input_type_name in schema_defs : entry = input_types [ index ] = schema_defs [ input_type_name ] if isinstance ( entry , collections . Mapping ) : if isinstance ( entry [ 'type' ] , string_types ) and '#' in entry [ 'type' ] : _ , input_type_name = entry [ 'type' ] . split ( '#' ) if input_type_name in schema_defs : input_types [ index ] [ 'type' ] = realize_input_schema ( schema_defs [ input_type_name ] , schema_defs ) if isinstance ( entry [ 'type' ] , collections . MutableSequence ) : input_types [ index ] [ 'type' ] = realize_input_schema ( entry [ 'type' ] , schema_defs ) if isinstance ( entry [ 'type' ] , collections . Mapping ) : input_types [ index ] [ 'type' ] = realize_input_schema ( [ input_types [ index ] [ 'type' ] ] , schema_defs ) if entry [ 'type' ] == 'array' : items = entry [ 'items' ] if not isinstance ( entry [ 'items' ] , string_types ) else [ entry [ 'items' ] ] input_types [ index ] [ 'items' ] = realize_input_schema ( items , schema_defs ) if entry [ 'type' ] == 'record' : input_types [ index ] [ 'fields' ] = realize_input_schema ( entry [ 'fields' ] , schema_defs ) return input_types
Replace references to named typed with the actual types .
8,192
def generate_input_template ( tool ) : template = yaml . comments . CommentedMap ( ) for inp in realize_input_schema ( tool . tool [ "inputs" ] , tool . schemaDefs ) : name = shortname ( inp [ "id" ] ) value , comment = generate_example_input ( inp [ 'type' ] , inp . get ( 'default' , None ) ) template . insert ( 0 , name , value , comment ) return template
Generate an example input object for the given CWL process .
8,193
def make_relative ( base , obj ) : uri = obj . get ( "location" , obj . get ( "path" ) ) if ":" in uri . split ( "/" ) [ 0 ] and not uri . startswith ( "file://" ) : pass else : if uri . startswith ( "file://" ) : uri = uri_file_path ( uri ) obj [ "location" ] = os . path . relpath ( uri , base )
Relativize the location URI of a File or Directory object .
8,194
def printdeps ( obj , document_loader , stdout , relative_deps , uri , basedir = None , nestdirs = True ) : deps = find_deps ( obj , document_loader , uri , basedir = basedir , nestdirs = nestdirs ) if relative_deps == "primary" : base = basedir if basedir else os . path . dirname ( uri_file_path ( str ( uri ) ) ) elif relative_deps == "cwd" : base = os . getcwd ( ) visit_class ( deps , ( "File" , "Directory" ) , functools . partial ( make_relative , base ) ) stdout . write ( json_dumps ( deps , indent = 4 ) )
Print a JSON representation of the dependencies of the CWL document .
8,195
def find_deps ( obj , document_loader , uri , basedir = None , nestdirs = True ) : deps = { "class" : "File" , "location" : uri , "format" : CWL_IANA } def loadref ( base , uri ) : return document_loader . fetch ( document_loader . fetcher . urljoin ( base , uri ) ) sfs = scandeps ( basedir if basedir else uri , obj , { "$import" , "run" } , { "$include" , "$schemas" , "location" } , loadref , nestdirs = nestdirs ) if sfs is not None : deps [ "secondaryFiles" ] = sfs return deps
Find the dependencies of the CWL document .
8,196
def print_pack ( document_loader , processobj , uri , metadata ) : packed = pack ( document_loader , processobj , uri , metadata ) if len ( packed [ "$graph" ] ) > 1 : return json_dumps ( packed , indent = 4 ) return json_dumps ( packed [ "$graph" ] [ 0 ] , indent = 4 )
Return a CWL serialization of the CWL document in JSON .
8,197
def find_default_container ( builder , default_container = None , use_biocontainers = None , ) : if not default_container and use_biocontainers : default_container = get_container_from_software_requirements ( use_biocontainers , builder ) return default_container
Default finder for default containers .
8,198
def run ( * args , ** kwargs ) : signal . signal ( signal . SIGTERM , _signal_handler ) try : sys . exit ( main ( * args , ** kwargs ) ) finally : _terminate_processes ( )
Run cwltool .
8,199
def add ( self , value ) : if not isinstance ( value , string_types ) : raise Exception ( "Secret store only accepts strings" ) if value not in self . secrets : placeholder = "(secret-%s)" % Text ( uuid . uuid4 ( ) ) self . secrets [ placeholder ] = value return placeholder return value
Add the given value to the store .