idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
48,800
def get_prediction_results ( model_dir_or_id , data , headers , img_cols = None , cloud = False , with_source = True , show_image = True ) : if img_cols is None : img_cols = [ ] if isinstance ( data , pd . DataFrame ) : data = list ( data . T . to_dict ( ) . values ( ) ) elif isinstance ( data [ 0 ] , six . string_types ) : data = list ( csv . DictReader ( data , fieldnames = headers ) ) images = _download_images ( data , img_cols ) predict_data = _get_predicton_csv_lines ( data , headers , images ) if cloud : parts = model_dir_or_id . split ( '.' ) if len ( parts ) != 2 : raise ValueError ( 'Invalid model name for cloud prediction. Use "model.version".' ) predict_results = ml . ModelVersions ( parts [ 0 ] ) . predict ( parts [ 1 ] , predict_data ) else : tf_logging_level = logging . getLogger ( "tensorflow" ) . level logging . getLogger ( "tensorflow" ) . setLevel ( logging . WARNING ) try : predict_results = _tf_predict ( model_dir_or_id , predict_data ) finally : logging . getLogger ( "tensorflow" ) . setLevel ( tf_logging_level ) df_r = pd . DataFrame ( predict_results ) if not with_source : return df_r display_data = data if show_image : display_data = _get_display_data_with_images ( data , images ) df_s = pd . DataFrame ( display_data ) df = pd . concat ( [ df_r , df_s ] , axis = 1 ) df = df . loc [ : , ~ df . columns . duplicated ( ) ] return df
Predict with a specified model .
48,801
def get_probs_for_labels ( labels , prediction_results ) : probs = [ ] if 'probability' in prediction_results : for i , r in prediction_results . iterrows ( ) : probs_one = [ 0.0 ] * len ( labels ) for k , v in six . iteritems ( r ) : if v in labels and k . startswith ( 'predicted' ) : if k == 'predict' : prob_name = 'probability' else : prob_name = 'probability' + k [ 9 : ] probs_one [ labels . index ( v ) ] = r [ prob_name ] probs . append ( probs_one ) return probs else : for i , r in prediction_results . iterrows ( ) : probs_one = [ 0.0 ] * len ( labels ) for k , v in six . iteritems ( r ) : if k in labels : probs_one [ labels . index ( k ) ] = v probs . append ( probs_one ) return probs
Given ML Workbench prediction results get probs of each label for each instance .
48,802
def local_batch_predict ( model_dir , csv_file_pattern , output_dir , output_format , batch_size = 100 ) : file_io . recursive_create_dir ( output_dir ) csv_files = file_io . get_matching_files ( csv_file_pattern ) if len ( csv_files ) == 0 : raise ValueError ( 'No files found given ' + csv_file_pattern ) with tf . Graph ( ) . as_default ( ) , tf . Session ( ) as sess : input_alias_map , output_alias_map = _tf_load_model ( sess , model_dir ) csv_tensor_name = list ( input_alias_map . values ( ) ) [ 0 ] output_schema = _get_output_schema ( sess , output_alias_map ) for csv_file in csv_files : output_file = os . path . join ( output_dir , 'predict_results_' + os . path . splitext ( os . path . basename ( csv_file ) ) [ 0 ] + '.' + output_format ) with file_io . FileIO ( output_file , 'w' ) as f : prediction_source = _batch_csv_reader ( csv_file , batch_size ) for batch in prediction_source : batch = [ l . rstrip ( ) for l in batch if l ] predict_results = sess . run ( fetches = output_alias_map , feed_dict = { csv_tensor_name : batch } ) formatted_results = _format_results ( output_format , output_schema , predict_results ) f . write ( '\n' . join ( formatted_results ) + '\n' ) file_io . write_string_to_file ( os . path . join ( output_dir , 'predict_results_schema.json' ) , json . dumps ( output_schema , indent = 2 ) )
Batch Predict with a specified model .
48,803
def submit_training ( job_request , job_id = None ) : new_job_request = dict ( job_request ) if 'args' in job_request and isinstance ( job_request [ 'args' ] , dict ) : job_args = job_request [ 'args' ] args = [ ] for k , v in six . iteritems ( job_args ) : if isinstance ( v , list ) : for item in v : args . append ( '--' + str ( k ) ) args . append ( str ( item ) ) else : args . append ( '--' + str ( k ) ) args . append ( str ( v ) ) new_job_request [ 'args' ] = args if job_id is None : job_id = datetime . datetime . now ( ) . strftime ( '%y%m%d_%H%M%S' ) if 'python_module' in new_job_request : job_id = new_job_request [ 'python_module' ] . replace ( '.' , '_' ) + '_' + job_id job = { 'job_id' : job_id , 'training_input' : new_job_request , } context = datalab . Context . default ( ) cloudml = discovery . build ( 'ml' , 'v1' , credentials = context . credentials ) request = cloudml . projects ( ) . jobs ( ) . create ( body = job , parent = 'projects/' + context . project_id ) request . headers [ 'user-agent' ] = 'GoogleCloudDataLab/1.0' request . execute ( ) return Job ( job_id )
Submit a training job .
48,804
def submit_batch_prediction ( job_request , job_id = None ) : if job_id is None : job_id = 'prediction_' + datetime . datetime . now ( ) . strftime ( '%y%m%d_%H%M%S' ) job = { 'job_id' : job_id , 'prediction_input' : job_request , } context = datalab . Context . default ( ) cloudml = discovery . build ( 'ml' , 'v1' , credentials = context . credentials ) request = cloudml . projects ( ) . jobs ( ) . create ( body = job , parent = 'projects/' + context . project_id ) request . headers [ 'user-agent' ] = 'GoogleCloudDataLab/1.0' request . execute ( ) return Job ( job_id )
Submit a batch prediction job .
48,805
def _reduced_kernel_size_for_small_input ( input_tensor , kernel_size ) : shape = input_tensor . get_shape ( ) . as_list ( ) if shape [ 1 ] is None or shape [ 2 ] is None : kernel_size_out = kernel_size else : kernel_size_out = [ min ( shape [ 1 ] , kernel_size [ 0 ] ) , min ( shape [ 2 ] , kernel_size [ 1 ] ) ] return kernel_size_out
Define kernel size which is automatically reduced for small input .
48,806
def inception_v3_arg_scope ( weight_decay = 0.00004 , stddev = 0.1 , batch_norm_var_collection = 'moving_vars' ) : batch_norm_params = { 'decay' : 0.9997 , 'epsilon' : 0.001 , 'updates_collections' : tf . GraphKeys . UPDATE_OPS , 'variables_collections' : { 'beta' : None , 'gamma' : None , 'moving_mean' : [ batch_norm_var_collection ] , 'moving_variance' : [ batch_norm_var_collection ] , } } with slim . arg_scope ( [ slim . conv2d , slim . fully_connected ] , weights_regularizer = slim . l2_regularizer ( weight_decay ) ) : with slim . arg_scope ( [ slim . conv2d ] , weights_initializer = tf . truncated_normal_initializer ( stddev = stddev ) , activation_fn = tf . nn . relu , normalizer_fn = slim . batch_norm , normalizer_params = batch_norm_params ) as sc : return sc
Defines the default InceptionV3 arg scope .
48,807
def preprocess ( train_dataset , output_dir , eval_dataset , checkpoint ) : import apache_beam as beam from google . datalab . utils import LambdaJob from . import _preprocess if checkpoint is None : checkpoint = _util . _DEFAULT_CHECKPOINT_GSURL job_id = ( 'preprocess-image-classification-' + datetime . datetime . now ( ) . strftime ( '%y%m%d-%H%M%S' ) ) options = { 'project' : _util . default_project ( ) , } opts = beam . pipeline . PipelineOptions ( flags = [ ] , ** options ) p = beam . Pipeline ( 'DirectRunner' , options = opts ) _preprocess . configure_pipeline ( p , train_dataset , eval_dataset , checkpoint , output_dir , job_id ) job = LambdaJob ( lambda : p . run ( ) . wait_until_finish ( ) , job_id ) return job
Preprocess data locally .
48,808
def train ( input_dir , batch_size , max_steps , output_dir , checkpoint ) : from google . datalab . utils import LambdaJob if checkpoint is None : checkpoint = _util . _DEFAULT_CHECKPOINT_GSURL labels = _util . get_labels ( input_dir ) model = _model . Model ( labels , 0.5 , checkpoint ) task_data = { 'type' : 'master' , 'index' : 0 } task = type ( 'TaskSpec' , ( object , ) , task_data ) job = LambdaJob ( lambda : _trainer . Trainer ( input_dir , batch_size , max_steps , output_dir , model , None , task ) . run_training ( ) , 'training' ) return job
Train model locally .
48,809
def predict ( model_dir , image_files , resize , show_image ) : from . import _predictor images = _util . load_images ( image_files , resize = resize ) labels_and_scores = _predictor . predict ( model_dir , images ) results = zip ( image_files , images , labels_and_scores ) ret = _util . process_prediction_results ( results , show_image ) return ret
Predict using an model in a local or GCS directory .
48,810
def batch_predict ( dataset , model_dir , output_csv , output_bq_table ) : import apache_beam as beam from google . datalab . utils import LambdaJob from . import _predictor if output_csv is None and output_bq_table is None : raise ValueError ( 'output_csv and output_bq_table cannot both be None.' ) job_id = ( 'batch-predict-image-classification-' + datetime . datetime . now ( ) . strftime ( '%y%m%d-%H%M%S' ) ) options = { 'project' : _util . default_project ( ) , } opts = beam . pipeline . PipelineOptions ( flags = [ ] , ** options ) p = beam . Pipeline ( 'DirectRunner' , options = opts ) _predictor . configure_pipeline ( p , dataset , model_dir , output_csv , output_bq_table ) job = LambdaJob ( lambda : p . run ( ) . wait_until_finish ( ) , job_id ) return job
Batch predict running locally .
48,811
def result ( self ) : self . wait ( ) if self . _fatal_error : raise self . _fatal_error return self . _result
Get the result for a job . This will block if the job is incomplete .
48,812
def _refresh_state ( self ) : if self . _is_complete : return if not self . _future : raise Exception ( 'Please implement this in the derived class' ) if self . _future . done ( ) : self . _is_complete = True self . _end_time = datetime . datetime . utcnow ( ) try : self . _result = self . _future . result ( ) except Exception as e : message = str ( e ) self . _fatal_error = JobError ( location = traceback . format_exc ( ) , message = message , reason = str ( type ( e ) ) )
Get the state of a job . Must be overridden by derived Job classes for Jobs that don t use a Future .
48,813
def state ( self ) : state = 'in progress' if self . is_complete : if self . failed : state = 'failed with error: %s' % str ( self . _fatal_error ) elif self . _errors : state = 'completed with some non-fatal errors' else : state = 'completed' return state
Describe the state of a Job .
48,814
def wait_any ( jobs , timeout = None ) : return Job . _wait ( jobs , timeout , concurrent . futures . FIRST_COMPLETED )
Return when at least one of the specified jobs has completed or timeout expires .
48,815
def wait_all ( jobs , timeout = None ) : return Job . _wait ( jobs , timeout , concurrent . futures . ALL_COMPLETED )
Return when at all of the specified jobs have completed or timeout expires .
48,816
def evaluate ( self , num_eval_batches = None ) : num_eval_batches = num_eval_batches or self . num_eval_batches with tf . Graph ( ) . as_default ( ) as graph : self . tensors = self . model . build_eval_graph ( self . eval_data_paths , self . batch_size ) self . summary = tf . summary . merge_all ( ) self . saver = tf . train . Saver ( ) self . summary_writer = tf . summary . FileWriter ( self . output_path ) self . sv = tf . train . Supervisor ( graph = graph , logdir = self . output_path , summary_op = None , global_step = None , saver = self . saver ) last_checkpoint = tf . train . latest_checkpoint ( self . checkpoint_path ) with self . sv . managed_session ( master = '' , start_standard_services = False ) as session : self . sv . saver . restore ( session , last_checkpoint ) if not self . batch_of_examples : self . sv . start_queue_runners ( session ) for i in range ( num_eval_batches ) : self . batch_of_examples . append ( session . run ( self . tensors . examples ) ) for i in range ( num_eval_batches ) : session . run ( self . tensors . metric_updates , { self . tensors . examples : self . batch_of_examples [ i ] } ) metric_values = session . run ( self . tensors . metric_values ) global_step = tf . train . global_step ( session , self . tensors . global_step ) summary = session . run ( self . summary ) self . summary_writer . add_summary ( summary , global_step ) self . summary_writer . flush ( ) return metric_values
Run one round of evaluation return loss and accuracy .
48,817
def log ( self , session ) : logging . info ( 'Train [%s/%d], step %d (%.3f sec) %.1f ' 'global steps/s, %.1f local steps/s' , self . task . type , self . task . index , self . global_step , ( self . now - self . start_time ) , ( self . global_step - self . last_global_step ) / ( self . now - self . last_global_time ) , ( self . local_step - self . last_local_step ) / ( self . now - self . last_local_time ) ) self . last_log = self . now self . last_global_step , self . last_global_time = self . global_step , self . now self . last_local_step , self . last_local_time = self . local_step , self . now
Logs training progress .
48,818
def eval ( self , session ) : eval_start = time . time ( ) self . saver . save ( session , self . sv . save_path , self . tensors . global_step ) logging . info ( 'Eval, step %d:\n- on train set %s\n-- on eval set %s' , self . global_step , self . model . format_metric_values ( self . train_evaluator . evaluate ( ) ) , self . model . format_metric_values ( self . evaluator . evaluate ( ) ) ) now = time . time ( ) eval_time = now - eval_start train_eval_rate = self . eval_interval / eval_time if train_eval_rate < self . min_train_eval_rate and self . last_save > 0 : logging . info ( 'Adjusting eval interval from %.2fs to %.2fs' , self . eval_interval , self . min_train_eval_rate * eval_time ) self . eval_interval = self . min_train_eval_rate * eval_time self . last_save = now self . last_log = now
Runs evaluation loop .
48,819
def plot ( self , data ) : import IPython if ( ( sys . version_info . major > 2 and isinstance ( data , str ) ) or ( sys . version_info . major <= 2 and isinstance ( data , basestring ) ) ) : data = bq . Query ( data ) if isinstance ( data , bq . Query ) : df = data . execute ( ) . result ( ) . to_dataframe ( ) data = self . _get_lantern_format ( df ) elif isinstance ( data , pd . core . frame . DataFrame ) : data = self . _get_lantern_format ( data ) else : raise Exception ( 'data needs to be a sql query, or a pandas DataFrame.' ) HTML_TEMPLATE = metrics_str = str ( map ( str , data [ 0 ] [ 'metricValues' ] . keys ( ) ) ) data_str = str ( [ { str ( k ) : json . dumps ( v ) for k , v in elem . iteritems ( ) } for elem in data ] ) html_id = 'l' + datalab . utils . commands . Html . next_id ( ) html = HTML_TEMPLATE . format ( html_id = html_id , metrics = metrics_str , data = data_str ) IPython . display . display ( IPython . display . HTML ( html ) )
Plots a featire slice view on given data .
48,820
def run_analysis ( args ) : import google . datalab . bigquery as bq if args . bigquery_table : table = bq . Table ( args . bigquery_table ) schema_list = table . schema . _bq_schema else : schema_list = json . loads ( file_io . read_file_to_string ( args . schema_file ) . decode ( ) ) table = bq . ExternalDataSource ( source = args . input_file_pattern , schema = bq . Schema ( schema_list ) ) for col_schema in schema_list : col_type = col_schema [ 'type' ] . lower ( ) if col_type != 'string' and col_type != 'integer' and col_type != 'float' : raise ValueError ( 'Schema contains an unsupported type %s.' % col_type ) run_numerical_analysis ( table , schema_list , args ) run_categorical_analysis ( table , schema_list , args ) file_io . write_string_to_file ( os . path . join ( args . output_dir , SCHEMA_FILE ) , json . dumps ( schema_list , indent = 2 , separators = ( ',' , ': ' ) ) )
Builds an analysis file for training .
48,821
def from_csv ( input_csv , headers = None , schema_file = None ) : if headers is not None : names = headers elif schema_file is not None : with _util . open_local_or_gcs ( schema_file , mode = 'r' ) as f : schema = json . load ( f ) names = [ x [ 'name' ] for x in schema ] else : raise ValueError ( 'Either headers or schema_file is needed' ) all_files = _util . glob_files ( input_csv ) all_df = [ ] for file_name in all_files : with _util . open_local_or_gcs ( file_name , mode = 'r' ) as f : all_df . append ( pd . read_csv ( f , names = names ) ) df = pd . concat ( all_df , ignore_index = True ) if 'target' not in df or 'predicted' not in df : raise ValueError ( 'Cannot find "target" or "predicted" column' ) labels = sorted ( set ( df [ 'target' ] ) | set ( df [ 'predicted' ] ) ) cm = confusion_matrix ( df [ 'target' ] , df [ 'predicted' ] , labels = labels ) return ConfusionMatrix ( cm , labels )
Create a ConfusionMatrix from a csv file .
48,822
def from_bigquery ( sql ) : if isinstance ( sql , bq . Query ) : sql = sql . _expanded_sql ( ) parts = sql . split ( '.' ) if len ( parts ) == 1 or len ( parts ) > 3 or any ( ' ' in x for x in parts ) : sql = '(' + sql + ')' else : sql = '`' + sql + '`' query = bq . Query ( 'SELECT target, predicted, count(*) as count FROM %s group by target, predicted' % sql ) df = query . execute ( ) . result ( ) . to_dataframe ( ) labels = sorted ( set ( df [ 'target' ] ) | set ( df [ 'predicted' ] ) ) labels_count = len ( labels ) df [ 'target' ] = [ labels . index ( x ) for x in df [ 'target' ] ] df [ 'predicted' ] = [ labels . index ( x ) for x in df [ 'predicted' ] ] cm = [ [ 0 ] * labels_count for i in range ( labels_count ) ] for index , row in df . iterrows ( ) : cm [ row [ 'target' ] ] [ row [ 'predicted' ] ] = row [ 'count' ] return ConfusionMatrix ( cm , labels )
Create a ConfusionMatrix from a BigQuery table or query .
48,823
def to_dataframe ( self ) : data = [ ] for target_index , target_row in enumerate ( self . _cm ) : for predicted_index , count in enumerate ( target_row ) : data . append ( ( self . _labels [ target_index ] , self . _labels [ predicted_index ] , count ) ) return pd . DataFrame ( data , columns = [ 'target' , 'predicted' , 'count' ] )
Convert the confusion matrix to a dataframe .
48,824
def plot ( self , figsize = None , rotation = 45 ) : fig , ax = plt . subplots ( figsize = figsize ) plt . imshow ( self . _cm , interpolation = 'nearest' , cmap = plt . cm . Blues , aspect = 'auto' ) plt . title ( 'Confusion matrix' ) plt . colorbar ( ) tick_marks = np . arange ( len ( self . _labels ) ) plt . xticks ( tick_marks , self . _labels , rotation = rotation ) plt . yticks ( tick_marks , self . _labels ) if isinstance ( self . _cm , list ) : thresh = max ( max ( self . _cm ) ) / 2. for i , j in itertools . product ( range ( len ( self . _labels ) ) , range ( len ( self . _labels ) ) ) : plt . text ( j , i , self . _cm [ i ] [ j ] , horizontalalignment = "center" , color = "white" if self . _cm [ i ] [ j ] > thresh else "black" ) else : thresh = self . _cm . max ( ) / 2. for i , j in itertools . product ( range ( len ( self . _labels ) ) , range ( len ( self . _labels ) ) ) : plt . text ( j , i , self . _cm [ i , j ] , horizontalalignment = "center" , color = "white" if self . _cm [ i , j ] > thresh else "black" ) plt . tight_layout ( ) plt . ylabel ( 'True label' ) plt . xlabel ( 'Predicted label' )
Plot the confusion matrix .
48,825
def get_environment_details ( zone , environment ) : default_context = google . datalab . Context . default ( ) url = ( Api . _ENDPOINT + ( Api . _ENVIRONMENTS_PATH_FORMAT % ( default_context . project_id , zone , environment ) ) ) return google . datalab . utils . Http . request ( url , credentials = default_context . credentials )
Issues a request to Composer to get the environment details .
48,826
def buckets_delete ( self , bucket ) : url = Api . _ENDPOINT + ( Api . _BUCKET_PATH % bucket ) google . datalab . utils . Http . request ( url , method = 'DELETE' , credentials = self . _credentials , raw_response = True )
Issues a request to delete a bucket .
48,827
def buckets_get ( self , bucket , projection = 'noAcl' ) : args = { 'projection' : projection } url = Api . _ENDPOINT + ( Api . _BUCKET_PATH % bucket ) return google . datalab . utils . Http . request ( url , credentials = self . _credentials , args = args )
Issues a request to retrieve information about a bucket .
48,828
def buckets_list ( self , projection = 'noAcl' , max_results = 0 , page_token = None , project_id = None ) : if max_results == 0 : max_results = Api . _MAX_RESULTS args = { 'project' : project_id if project_id else self . _project_id , 'maxResults' : max_results } if projection is not None : args [ 'projection' ] = projection if page_token is not None : args [ 'pageToken' ] = page_token url = Api . _ENDPOINT + ( Api . _BUCKET_PATH % '' ) return google . datalab . utils . Http . request ( url , args = args , credentials = self . _credentials )
Issues a request to retrieve the list of buckets .
48,829
def object_download ( self , bucket , key , start_offset = 0 , byte_count = None ) : args = { 'alt' : 'media' } headers = { } if start_offset > 0 or byte_count is not None : header = 'bytes=%d-' % start_offset if byte_count is not None : header += '%d' % byte_count headers [ 'Range' ] = header url = Api . _DOWNLOAD_ENDPOINT + ( Api . _OBJECT_PATH % ( bucket , Api . _escape_key ( key ) ) ) return google . datalab . utils . Http . request ( url , args = args , headers = headers , credentials = self . _credentials , raw_response = True )
Reads the contents of an object as text .
48,830
def object_upload ( self , bucket , key , content , content_type ) : args = { 'uploadType' : 'media' , 'name' : key } headers = { 'Content-Type' : content_type } url = Api . _UPLOAD_ENDPOINT + ( Api . _OBJECT_PATH % ( bucket , '' ) ) return google . datalab . utils . Http . request ( url , args = args , data = content , headers = headers , credentials = self . _credentials , raw_response = True )
Writes text content to the object .
48,831
def preprocess_async ( train_dataset , output_dir , eval_dataset = None , checkpoint = None , cloud = None ) : with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) if cloud is None : return _local . Local . preprocess ( train_dataset , output_dir , eval_dataset , checkpoint ) if not isinstance ( cloud , dict ) : cloud = { } return _cloud . Cloud . preprocess ( train_dataset , output_dir , eval_dataset , checkpoint , cloud )
Preprocess data . Produce output that can be used by training efficiently .
48,832
def train_async ( input_dir , batch_size , max_steps , output_dir , checkpoint = None , cloud = None ) : with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) if cloud is None : return _local . Local . train ( input_dir , batch_size , max_steps , output_dir , checkpoint ) return _cloud . Cloud . train ( input_dir , batch_size , max_steps , output_dir , checkpoint , cloud )
Train model . The output can be used for batch prediction or online deployment .
48,833
def batch_predict_async ( dataset , model_dir , output_csv = None , output_bq_table = None , cloud = None ) : with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) if cloud is None : return _local . Local . batch_predict ( dataset , model_dir , output_csv , output_bq_table ) if not isinstance ( cloud , dict ) : cloud = { } return _cloud . Cloud . batch_predict ( dataset , model_dir , output_csv , output_bq_table , cloud )
Batch prediction with an offline model .
48,834
def _refresh_state ( self ) : if self . _is_complete : return try : response = self . _api . jobs_get ( self . _job_id ) except Exception as e : raise e if 'status' in response : status = response [ 'status' ] if 'state' in status and status [ 'state' ] == 'DONE' : self . _end_time = datetime . datetime . utcnow ( ) self . _is_complete = True self . _process_job_status ( status ) if 'statistics' in response : statistics = response [ 'statistics' ] start_time = statistics . get ( 'creationTime' , None ) end_time = statistics . get ( 'endTime' , None ) if start_time and end_time and end_time >= start_time : self . _start_time = datetime . datetime . fromtimestamp ( float ( start_time ) / 1000.0 ) self . _end_time = datetime . datetime . fromtimestamp ( float ( end_time ) / 1000.0 )
Get the state of a job . If the job is complete this does nothing ; otherwise it gets a refreshed copy of the job resource .
48,835
def monitoring ( line , cell = None ) : parser = datalab . utils . commands . CommandParser ( prog = 'monitoring' , description = ( 'Execute various Monitoring-related operations. Use "%monitoring ' '<command> -h" for help on a specific command.' ) ) list_parser = parser . subcommand ( 'list' , 'List the metrics or resource types in a monitored project.' ) list_metric_parser = list_parser . subcommand ( 'metrics' , 'List the metrics that are available through the Monitoring API.' ) list_metric_parser . add_argument ( '-t' , '--type' , help = 'The type of metric(s) to list; can include wildchars.' ) list_metric_parser . add_argument ( '-p' , '--project' , help = 'The project on which to execute the request.' ) list_metric_parser . set_defaults ( func = _list_metric_descriptors ) list_resource_parser = list_parser . subcommand ( 'resource_types' , ( 'List the monitored resource types that are available through the ' 'Monitoring API.' ) ) list_resource_parser . add_argument ( '-p' , '--project' , help = 'The project on which to execute the request.' ) list_resource_parser . add_argument ( '-t' , '--type' , help = 'The resource type(s) to list; can include wildchars.' ) list_resource_parser . set_defaults ( func = _list_resource_descriptors ) list_group_parser = list_parser . subcommand ( 'groups' , ( 'List the Stackdriver groups in this project.' ) ) list_group_parser . add_argument ( '-p' , '--project' , help = 'The project on which to execute the request.' ) list_group_parser . add_argument ( '-n' , '--name' , help = 'The name of the group(s) to list; can include wildchars.' ) list_group_parser . set_defaults ( func = _list_groups ) return datalab . utils . commands . handle_magic_line ( line , cell , parser )
Implements the monitoring cell magic for ipython notebooks .
48,836
def _render_dataframe ( dataframe ) : data = dataframe . to_dict ( orient = 'records' ) fields = dataframe . columns . tolist ( ) return IPython . core . display . HTML ( datalab . utils . commands . HtmlBuilder . render_table ( data , fields ) )
Helper to render a dataframe as an HTML table .
48,837
def _get_job_status ( line ) : try : args = line . strip ( ) . split ( ) job_name = args [ 0 ] job = None if job_name in _local_jobs : job = _local_jobs [ job_name ] else : raise Exception ( 'invalid job %s' % job_name ) if job is not None : error = '' if job . fatal_error is None else str ( job . fatal_error ) data = { 'exists' : True , 'done' : job . is_complete , 'error' : error } else : data = { 'exists' : False } except Exception as e : google . datalab . utils . print_exception_with_last_stack ( e ) data = { 'done' : True , 'error' : str ( e ) } return IPython . core . display . JSON ( data )
magic used as an endpoint for client to get job status .
48,838
def updated_on ( self ) : s = self . _info . get ( 'updated' , None ) return dateutil . parser . parse ( s ) if s else None
The updated timestamp of the object as a datetime . datetime .
48,839
def delete ( self , wait_for_deletion = True ) : if self . exists ( ) : try : self . _api . objects_delete ( self . _bucket , self . _key ) except Exception as e : raise e if wait_for_deletion : for _ in range ( _MAX_POLL_ATTEMPTS ) : objects = Objects ( self . _bucket , prefix = self . key , delimiter = '/' , context = self . _context ) if any ( o . key == self . key for o in objects ) : time . sleep ( _POLLING_SLEEP ) continue break else : logging . error ( 'Failed to see object deletion after %d attempts.' , _MAX_POLL_ATTEMPTS )
Deletes this object from its bucket .
48,840
def metadata ( self ) : if self . _info is None : try : self . _info = self . _api . objects_get ( self . _bucket , self . _key ) except Exception as e : raise e return ObjectMetadata ( self . _info ) if self . _info else None
Retrieves metadata about the object .
48,841
def read_stream ( self , start_offset = 0 , byte_count = None ) : try : return self . _api . object_download ( self . _bucket , self . _key , start_offset = start_offset , byte_count = byte_count ) except Exception as e : raise e
Reads the content of this object as text .
48,842
def read_lines ( self , max_lines = None ) : if max_lines is None : return self . read_stream ( ) . split ( '\n' ) max_to_read = self . metadata . size bytes_to_read = min ( 100 * max_lines , self . metadata . size ) while True : content = self . read_stream ( byte_count = bytes_to_read ) lines = content . split ( '\n' ) if len ( lines ) > max_lines or bytes_to_read >= max_to_read : break bytes_to_read = min ( bytes_to_read * 10 , max_to_read ) del lines [ - 1 ] return lines [ 0 : max_lines ]
Reads the content of this object as text and return a list of lines up to some max .
48,843
def sample_to ( self , count , skip_header_rows , strategy , target ) : if sys . version_info . major > 2 : xrange = range if strategy == 'BIGQUERY' : import datalab . bigquery as bq if not self . path . startswith ( 'gs://' ) : raise Exception ( 'Cannot use BIGQUERY if data is not in GCS' ) federated_table = self . _create_federated_table ( skip_header_rows ) row_count = self . _get_gcs_csv_row_count ( federated_table ) query = bq . Query ( 'SELECT * from data' , data_sources = { 'data' : federated_table } ) sampling = bq . Sampling . random ( count * 100 / float ( row_count ) ) sample = query . sample ( sampling = sampling ) df = sample . to_dataframe ( ) elif strategy == 'LOCAL' : local_file = self . path if self . path . startswith ( 'gs://' ) : local_file = tempfile . mktemp ( ) datalab . utils . gcs_copy_file ( self . path , local_file ) with open ( local_file ) as f : row_count = sum ( 1 for line in f ) start_row = 1 if skip_header_rows is True else 0 skip_count = row_count - count - 1 if skip_header_rows is True else row_count - count skip = sorted ( random . sample ( xrange ( start_row , row_count ) , skip_count ) ) header_row = 0 if skip_header_rows is True else None df = pd . read_csv ( local_file , skiprows = skip , header = header_row , delimiter = self . _delimiter ) if self . path . startswith ( 'gs://' ) : os . remove ( local_file ) else : raise Exception ( 'strategy must be BIGQUERY or LOCAL' ) if target . startswith ( 'gs://' ) : with tempfile . NamedTemporaryFile ( ) as f : df . to_csv ( f , header = False , index = False ) f . flush ( ) datalab . utils . gcs_copy_file ( f . name , target ) else : with open ( target , 'w' ) as f : df . to_csv ( f , header = False , index = False , sep = str ( self . _delimiter ) )
Sample rows from GCS or local file and save results to target file .
48,844
def _ParseExample ( self , example_features , example_feature_lists , entries , index ) : features_seen = set ( ) for feature_list , is_feature in zip ( [ example_features , example_feature_lists ] , [ True , False ] ) : sequence_length = None for feature_name in feature_list : if feature_name not in entries : entries [ feature_name ] = { 'vals' : [ ] , 'counts' : [ ] , 'feat_lens' : [ ] , 'missing' : index } feature_entry = entries [ feature_name ] feature = feature_list [ feature_name ] value_type = None value_list = [ ] if is_feature : if feature . HasField ( 'float_list' ) : value_list = feature . float_list . value value_type = self . fs_proto . FLOAT elif feature . HasField ( 'bytes_list' ) : value_list = feature . bytes_list . value value_type = self . fs_proto . STRING elif feature . HasField ( 'int64_list' ) : value_list = feature . int64_list . value value_type = self . fs_proto . INT else : sequence_length = len ( feature . feature ) if sequence_length != 0 and feature . feature [ 0 ] . HasField ( 'float_list' ) : for feat in feature . feature : for value in feat . float_list . value : value_list . append ( value ) value_type = self . fs_proto . FLOAT elif sequence_length != 0 and feature . feature [ 0 ] . HasField ( 'bytes_list' ) : for feat in feature . feature : for value in feat . bytes_list . value : value_list . append ( value ) value_type = self . fs_proto . STRING elif sequence_length != 0 and feature . feature [ 0 ] . HasField ( 'int64_list' ) : for feat in feature . feature : for value in feat . int64_list . value : value_list . append ( value ) value_type = self . fs_proto . INT if value_type is not None : if 'type' not in feature_entry : feature_entry [ 'type' ] = value_type elif feature_entry [ 'type' ] != value_type : raise TypeError ( 'type mismatch for feature ' + feature_name ) feature_entry [ 'counts' ] . append ( len ( value_list ) ) feature_entry [ 'vals' ] . extend ( value_list ) if sequence_length is not None : feature_entry [ 'feat_lens' ] . append ( sequence_length ) if value_list : features_seen . add ( feature_name ) for f in entries : fv = entries [ f ] if f not in features_seen : fv [ 'missing' ] += 1
Parses data from an example populating a dictionary of feature values .
48,845
def _GetEntries ( self , paths , max_entries , iterator_from_file , is_sequence = False ) : entries = { } index = 0 for filepath in paths : reader = iterator_from_file ( filepath ) for record in reader : if is_sequence : sequence_example = tf . train . SequenceExample . FromString ( record ) self . _ParseExample ( sequence_example . context . feature , sequence_example . feature_lists . feature_list , entries , index ) else : self . _ParseExample ( tf . train . Example . FromString ( record ) . features . feature , [ ] , entries , index ) index += 1 if index == max_entries : return entries , index return entries , index
Extracts examples into a dictionary of feature values .
48,846
def _GetTfRecordEntries ( self , path , max_entries , is_sequence , iterator_options ) : return self . _GetEntries ( [ path ] , max_entries , partial ( tf . python_io . tf_record_iterator , options = iterator_options ) , is_sequence )
Extracts TFRecord examples into a dictionary of feature values .
48,847
def buckets_insert ( self , bucket , project_id = None ) : args = { 'project' : project_id if project_id else self . _project_id } data = { 'name' : bucket } url = Api . _ENDPOINT + ( Api . _BUCKET_PATH % '' ) return datalab . utils . Http . request ( url , args = args , data = data , credentials = self . _credentials )
Issues a request to create a new bucket .
48,848
def objects_delete ( self , bucket , key ) : url = Api . _ENDPOINT + ( Api . _OBJECT_PATH % ( bucket , Api . _escape_key ( key ) ) ) datalab . utils . Http . request ( url , method = 'DELETE' , credentials = self . _credentials , raw_response = True )
Deletes the specified object .
48,849
def list ( self , pattern = '*' ) : if self . _descriptors is None : self . _descriptors = self . _client . list_metric_descriptors ( filter_string = self . _filter_string , type_prefix = self . _type_prefix ) return [ metric for metric in self . _descriptors if fnmatch . fnmatch ( metric . type , pattern ) ]
Returns a list of metric descriptors that match the filters .
48,850
def _from_dataframe ( dataframe , default_type = 'STRING' ) : type_mapping = { 'i' : 'INTEGER' , 'b' : 'BOOLEAN' , 'f' : 'FLOAT' , 'O' : 'STRING' , 'S' : 'STRING' , 'U' : 'STRING' , 'M' : 'TIMESTAMP' } fields = [ ] for column_name , dtype in dataframe . dtypes . iteritems ( ) : fields . append ( { 'name' : column_name , 'type' : type_mapping . get ( dtype . kind , default_type ) } ) return fields
Infer a BigQuery table schema from a Pandas dataframe . Note that if you don t explicitly set the types of the columns in the dataframe they may be of a type that forces coercion to STRING so even though the fields in the dataframe themselves may be numeric the type in the derived schema may not be . Hence it is prudent to make sure the Pandas dataframe is typed correctly .
48,851
def _from_dict_record ( data ) : return [ Schema . _get_field_entry ( name , value ) for name , value in list ( data . items ( ) ) ]
Infer a BigQuery table schema from a dictionary . If the dictionary has entries that are in turn OrderedDicts these will be turned into RECORD types . Ideally this will be an OrderedDict but it is not required .
48,852
def _from_list_record ( data ) : return [ Schema . _get_field_entry ( 'Column%d' % ( i + 1 ) , value ) for i , value in enumerate ( data ) ]
Infer a BigQuery table schema from a list of values .
48,853
def _from_record ( data ) : if isinstance ( data , dict ) : return Schema . _from_dict_record ( data ) elif isinstance ( data , list ) : return Schema . _from_list_record ( data ) else : raise Exception ( 'Cannot create a schema from record %s' % str ( data ) )
Infer a BigQuery table schema from a list of fields or a dictionary . The typeof the elements is used . For a list the field names are simply Column1 Column2 etc .
48,854
def create_args ( line , namespace ) : args = [ ] for arg in shlex . split ( line ) : if not arg : continue if arg [ 0 ] == '$' : var_name = arg [ 1 : ] if var_name in namespace : args . append ( ( namespace [ var_name ] ) ) else : raise Exception ( 'Undefined variable referenced in command line: %s' % arg ) else : args . append ( arg ) return args
Expand any meta - variable references in the argument list .
48,855
def parse ( self , line , namespace = None ) : try : if namespace is None : ipy = IPython . get_ipython ( ) namespace = ipy . user_ns args = CommandParser . create_args ( line , namespace ) return self . parse_args ( args ) except Exception as e : print ( str ( e ) ) return None
Parses a line into a dictionary of arguments expanding meta - variables from a namespace .
48,856
def subcommand ( self , name , help ) : if self . _subcommands is None : self . _subcommands = self . add_subparsers ( help = 'commands' ) return self . _subcommands . add_parser ( name , description = help , help = help )
Creates a parser for a sub - command .
48,857
def render_text ( text , preformatted = False ) : return IPython . core . display . HTML ( _html . HtmlBuilder . render_text ( text , preformatted ) )
Return text formatted as a HTML
48,858
def _get_cols ( fields , schema ) : typemap = { 'STRING' : 'string' , 'INT64' : 'number' , 'INTEGER' : 'number' , 'FLOAT' : 'number' , 'FLOAT64' : 'number' , 'BOOL' : 'boolean' , 'BOOLEAN' : 'boolean' , 'DATE' : 'date' , 'TIME' : 'timeofday' , 'DATETIME' : 'datetime' , 'TIMESTAMP' : 'datetime' } cols = [ ] for col in fields : if schema : f = schema [ col ] t = 'string' if f . mode == 'REPEATED' else typemap . get ( f . data_type , 'string' ) cols . append ( { 'id' : f . name , 'label' : f . name , 'type' : t } ) else : cols . append ( { 'id' : col , 'label' : col , 'type' : 'number' } ) return cols
Get column metadata for Google Charts based on field list and schema .
48,859
def _get_data_from_empty_list ( source , fields = '*' , first_row = 0 , count = - 1 , schema = None ) : fields = get_field_list ( fields , schema ) return { 'cols' : _get_cols ( fields , schema ) , 'rows' : [ ] } , 0
Helper function for _get_data that handles empty lists .
48,860
def _get_data_from_table ( source , fields = '*' , first_row = 0 , count = - 1 , schema = None ) : if not source . exists ( ) : return _get_data_from_empty_list ( source , fields , first_row , count ) if schema is None : schema = source . schema fields = get_field_list ( fields , schema ) gen = source . range ( first_row , count ) if count >= 0 else source rows = [ { 'c' : [ { 'v' : row [ c ] } if c in row else { } for c in fields ] } for row in gen ] return { 'cols' : _get_cols ( fields , schema ) , 'rows' : rows } , source . length
Helper function for _get_data that handles BQ Tables .
48,861
def replace_vars ( config , env ) : if isinstance ( config , dict ) : for k , v in list ( config . items ( ) ) : if isinstance ( v , dict ) or isinstance ( v , list ) or isinstance ( v , tuple ) : replace_vars ( v , env ) elif isinstance ( v , basestring ) : config [ k ] = expand_var ( v , env ) elif isinstance ( config , list ) : for i , v in enumerate ( config ) : if isinstance ( v , dict ) or isinstance ( v , list ) or isinstance ( v , tuple ) : replace_vars ( v , env ) elif isinstance ( v , basestring ) : config [ i ] = expand_var ( v , env ) elif isinstance ( config , tuple ) : for v in config : if isinstance ( v , dict ) or isinstance ( v , list ) or isinstance ( v , tuple ) : replace_vars ( v , env )
Replace variable references in config using the supplied env dictionary .
48,862
def parse_config ( config , env , as_dict = True ) : if config is None : return None stripped = config . strip ( ) if len ( stripped ) == 0 : config = { } elif stripped [ 0 ] == '{' : config = json . loads ( config ) else : config = yaml . load ( config ) if as_dict : config = dict ( config ) replace_vars ( config , env ) return config
Parse a config from a magic cell body . This could be JSON or YAML . We turn it into a Python dictionary then recursively replace any variable references using the supplied env dictionary .
48,863
def validate_config ( config , required_keys , optional_keys = None ) : if optional_keys is None : optional_keys = [ ] if not isinstance ( config , dict ) : raise Exception ( 'config is not dict type' ) invalid_keys = set ( config ) - set ( required_keys + optional_keys ) if len ( invalid_keys ) > 0 : raise Exception ( 'Invalid config with unexpected keys "%s"' % ', ' . join ( e for e in invalid_keys ) ) missing_keys = set ( required_keys ) - set ( config ) if len ( missing_keys ) > 0 : raise Exception ( 'Invalid config with missing keys "%s"' % ', ' . join ( missing_keys ) )
Validate a config dictionary to make sure it includes all required keys and does not include any unexpected keys .
48,864
def validate_config_must_have ( config , required_keys ) : missing_keys = set ( required_keys ) - set ( config ) if len ( missing_keys ) > 0 : raise Exception ( 'Invalid config with missing keys "%s"' % ', ' . join ( missing_keys ) )
Validate a config dictionary to make sure it has all of the specified keys
48,865
def validate_config_has_one_of ( config , one_of_keys ) : intersection = set ( config ) . intersection ( one_of_keys ) if len ( intersection ) > 1 : raise Exception ( 'Only one of the values in "%s" is needed' % ', ' . join ( intersection ) ) if len ( intersection ) == 0 : raise Exception ( 'One of the values in "%s" is needed' % ', ' . join ( one_of_keys ) )
Validate a config dictionary to make sure it has one and only one key in one_of_keys .
48,866
def validate_config_value ( value , possible_values ) : if value not in possible_values : raise Exception ( 'Invalid config value "%s". Possible values are ' '%s' % ( value , ', ' . join ( e for e in possible_values ) ) )
Validate a config value to make sure it is one of the possible values .
48,867
def validate_gcs_path ( path , require_object ) : bucket , key = datalab . storage . _bucket . parse_name ( path ) if bucket is None : raise Exception ( 'Invalid GCS path "%s"' % path ) if require_object and key is None : raise Exception ( 'It appears the GCS path "%s" is a bucket path but not an object path' % path )
Check whether a given path is a valid GCS path .
48,868
def profile_df ( df ) : return IPython . core . display . HTML ( pandas_profiling . ProfileReport ( df ) . html . replace ( 'bootstrap' , 'nonexistent' ) )
Generate a profile of data in a dataframe .
48,869
def _package_to_staging ( staging_package_url ) : import google . datalab . ml as ml package_root = os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , '../../' ) ) setup_path = os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , 'master_setup.py' ) ) tar_gz_path = os . path . join ( staging_package_url , 'staging' , 'trainer.tar.gz' ) print ( 'Building package and uploading to %s' % tar_gz_path ) ml . package_and_copy ( package_root , setup_path , tar_gz_path ) return tar_gz_path
Repackage this package from local installed location and copy it to GCS .
48,870
def analyze ( output_dir , dataset , cloud = False , project_id = None ) : job = analyze_async ( output_dir = output_dir , dataset = dataset , cloud = cloud , project_id = project_id ) job . wait ( ) print ( 'Analyze: ' + str ( job . state ) )
Blocking version of analyze_async . See documentation of analyze_async .
48,871
def analyze_async ( output_dir , dataset , cloud = False , project_id = None ) : import google . datalab . utils as du with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) fn = lambda : _analyze ( output_dir , dataset , cloud , project_id ) return du . LambdaJob ( fn , job_id = None )
Analyze data locally or in the cloud with BigQuery .
48,872
def cloud_train ( train_dataset , eval_dataset , analysis_dir , output_dir , features , model_type , max_steps , num_epochs , train_batch_size , eval_batch_size , min_eval_frequency , top_n , layer_sizes , learning_rate , epsilon , job_name , job_name_prefix , config ) : import google . datalab . ml as ml if len ( train_dataset . input_files ) != 1 or len ( eval_dataset . input_files ) != 1 : raise ValueError ( 'CsvDataSets must be built with a file pattern, not list ' 'of files.' ) if file_io . file_exists ( output_dir ) : raise ValueError ( 'output_dir already exist. Use a new output path.' ) if isinstance ( features , dict ) : if not file_io . file_exists ( output_dir ) : file_io . recursive_create_dir ( output_dir ) features_file = os . path . join ( output_dir , 'features_file.json' ) file_io . write_string_to_file ( features_file , json . dumps ( features ) ) else : features_file = features if not isinstance ( config , ml . CloudTrainingConfig ) : raise ValueError ( 'cloud should be an instance of ' 'google.datalab.ml.CloudTrainingConfig for cloud training.' ) _assert_gcs_files ( [ output_dir , train_dataset . input_files [ 0 ] , eval_dataset . input_files [ 0 ] , features_file , analysis_dir ] ) args = [ '--train-data-paths=%s' % train_dataset . input_files [ 0 ] , '--eval-data-paths=%s' % eval_dataset . input_files [ 0 ] , '--preprocess-output-dir=%s' % analysis_dir , '--transforms-file=%s' % features_file , '--model-type=%s' % model_type , '--max-steps=%s' % str ( max_steps ) , '--train-batch-size=%s' % str ( train_batch_size ) , '--eval-batch-size=%s' % str ( eval_batch_size ) , '--min-eval-frequency=%s' % str ( min_eval_frequency ) , '--learning-rate=%s' % str ( learning_rate ) , '--epsilon=%s' % str ( epsilon ) ] if num_epochs : args . append ( '--num-epochs=%s' % str ( num_epochs ) ) if top_n : args . append ( '--top-n=%s' % str ( top_n ) ) if layer_sizes : for i in range ( len ( layer_sizes ) ) : args . append ( '--layer-size%s=%s' % ( i + 1 , str ( layer_sizes [ i ] ) ) ) job_request = { 'package_uris' : [ _package_to_staging ( output_dir ) , _TF_GS_URL , _PROTOBUF_GS_URL ] , 'python_module' : 'mltoolbox._structured_data.trainer.task' , 'job_dir' : output_dir , 'args' : args } job_request . update ( dict ( config . _asdict ( ) ) ) if not job_name : job_name = job_name_prefix or 'structured_data_train' job_name += '_' + datetime . datetime . now ( ) . strftime ( '%y%m%d_%H%M%S' ) job = ml . Job . submit_training ( job_request , job_name ) print ( 'Job request send. View status of job at' ) print ( 'https://console.developers.google.com/ml/jobs?project=%s' % _default_project ( ) ) return job
Train model using CloudML .
48,873
def predict ( data , training_dir = None , model_name = None , model_version = None , cloud = False ) : if cloud : if not model_version or not model_name : raise ValueError ( 'model_version or model_name is not set' ) if training_dir : raise ValueError ( 'training_dir not needed when cloud is True' ) with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) return cloud_predict ( model_name , model_version , data ) else : if not training_dir : raise ValueError ( 'training_dir is not set' ) if model_version or model_name : raise ValueError ( 'model_name and model_version not needed when cloud is ' 'False.' ) with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) return local_predict ( training_dir , data )
Runs prediction locally or on the cloud .
48,874
def local_predict ( training_dir , data ) : from . prediction import predict as predict_module tmp_dir = tempfile . mkdtemp ( ) _ , input_file_path = tempfile . mkstemp ( dir = tmp_dir , suffix = '.csv' , prefix = 'input' ) try : if isinstance ( data , pd . DataFrame ) : data . to_csv ( input_file_path , header = False , index = False ) else : with open ( input_file_path , 'w' ) as f : for line in data : f . write ( line + '\n' ) model_dir = os . path . join ( training_dir , 'model' ) if not file_io . file_exists ( model_dir ) : raise ValueError ( 'training_dir should contain the folder model' ) cmd = [ 'predict.py' , '--predict-data=%s' % input_file_path , '--trained-model-dir=%s' % model_dir , '--output-dir=%s' % tmp_dir , '--output-format=csv' , '--batch-size=16' , '--mode=prediction' , '--no-shard-files' ] runner_results = predict_module . main ( cmd ) runner_results . wait_until_finish ( ) schema_file = os . path . join ( tmp_dir , 'csv_schema.json' ) with open ( schema_file , 'r' ) as f : schema = json . loads ( f . read ( ) ) errors_file = glob . glob ( os . path . join ( tmp_dir , 'errors*' ) ) if errors_file and os . path . getsize ( errors_file [ 0 ] ) > 0 : print ( 'Warning: there are errors. See below:' ) with open ( errors_file [ 0 ] , 'r' ) as f : text = f . read ( ) print ( text ) prediction_file = glob . glob ( os . path . join ( tmp_dir , 'predictions*' ) ) if not prediction_file : raise FileNotFoundError ( 'Prediction results not found' ) predictions = pd . read_csv ( prediction_file [ 0 ] , header = None , names = [ col [ 'name' ] for col in schema ] ) return predictions finally : shutil . rmtree ( tmp_dir )
Runs local prediction on the prediction graph .
48,875
def cloud_predict ( model_name , model_version , data ) : import google . datalab . ml as ml if isinstance ( data , pd . DataFrame ) : string_buffer = io . StringIO ( ) data . to_csv ( string_buffer , header = None , index = False ) input_data = string_buffer . getvalue ( ) . split ( '\n' ) input_data = [ line for line in input_data if line ] else : input_data = data predictions = ml . ModelVersions ( model_name ) . predict ( model_version , input_data ) df = pd . DataFrame ( columns = sorted ( predictions [ 0 ] . keys ( ) ) ) for i in range ( len ( predictions ) ) : for k , v in predictions [ i ] . iteritems ( ) : df . loc [ i , k ] = v return df
Use Online prediction .
48,876
def batch_predict ( training_dir , prediction_input_file , output_dir , mode , batch_size = 16 , shard_files = True , output_format = 'csv' , cloud = False ) : job = batch_predict_async ( training_dir = training_dir , prediction_input_file = prediction_input_file , output_dir = output_dir , mode = mode , batch_size = batch_size , shard_files = shard_files , output_format = output_format , cloud = cloud ) job . wait ( ) print ( 'Batch predict: ' + str ( job . state ) )
Blocking versoin of batch_predict .
48,877
def batch_predict_async ( training_dir , prediction_input_file , output_dir , mode , batch_size = 16 , shard_files = True , output_format = 'csv' , cloud = False ) : import google . datalab . utils as du with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) if cloud : runner_results = cloud_batch_predict ( training_dir , prediction_input_file , output_dir , mode , batch_size , shard_files , output_format ) job = du . DataflowJob ( runner_results ) else : runner_results = local_batch_predict ( training_dir , prediction_input_file , output_dir , mode , batch_size , shard_files , output_format ) job = du . LambdaJob ( lambda : runner_results . wait_until_finish ( ) , job_id = None ) return job
Local and cloud batch prediction .
48,878
def make_prediction_pipeline ( pipeline , args ) : predicted_values , errors = ( pipeline | 'Read CSV Files' >> beam . io . ReadFromText ( str ( args . predict_data ) , strip_trailing_newlines = True ) | 'Batch Input' >> beam . ParDo ( EmitAsBatchDoFn ( args . batch_size ) ) | 'Run TF Graph on Batches' >> beam . ParDo ( RunGraphDoFn ( args . trained_model_dir ) ) . with_outputs ( 'errors' , main = 'main' ) ) ( ( predicted_values , errors ) | 'Format and Save' >> FormatAndSave ( args ) )
Builds the prediction pipeline .
48,879
def process ( self , element ) : import collections import apache_beam as beam num_in_batch = 0 try : assert self . _session is not None feed_dict = collections . defaultdict ( list ) for line in element : if line . endswith ( '\n' ) : line = line [ : - 1 ] feed_dict [ self . _input_alias_map . values ( ) [ 0 ] ] . append ( line ) num_in_batch += 1 batch_result = self . _session . run ( fetches = self . _tensor_names , feed_dict = feed_dict ) if num_in_batch > 1 : for result in zip ( * batch_result ) : predictions = { } for name , value in zip ( self . _aliases , result ) : predictions [ name ] = ( value . tolist ( ) if getattr ( value , 'tolist' , None ) else value ) yield predictions else : predictions = { } for i in range ( len ( self . _aliases ) ) : value = batch_result [ i ] value = ( value . tolist ( ) if getattr ( value , 'tolist' , None ) else value ) predictions [ self . _aliases [ i ] ] = value yield predictions except Exception as e : yield beam . pvalue . TaggedOutput ( 'errors' , ( str ( e ) , element ) )
Run batch prediciton on a TF graph .
48,880
def encode ( self , tf_graph_predictions ) : row = [ ] for col in self . _header : row . append ( str ( tf_graph_predictions [ col ] ) ) return ',' . join ( row )
Encodes the graph json prediction into csv .
48,881
def as_dataframe ( self , max_rows = None ) : max_rows = len ( self . _timeseries_list ) if max_rows is None else max_rows headers = [ { 'resource' : ts . resource . _asdict ( ) , 'metric' : ts . metric . _asdict ( ) } for ts in self . _timeseries_list [ : max_rows ] ] if not headers : return pandas . DataFrame ( ) dataframe = pandas . io . json . json_normalize ( headers ) dataframe . columns = pandas . MultiIndex . from_tuples ( [ ( col , '' ) if col == 'resource.type' else col . rsplit ( '.' , 1 ) for col in dataframe . columns ] ) resource_keys = google . cloud . monitoring . _dataframe . _sorted_resource_labels ( dataframe [ 'resource.labels' ] . columns ) sorted_columns = [ ( 'resource.type' , '' ) ] sorted_columns += [ ( 'resource.labels' , key ) for key in resource_keys ] sorted_columns += sorted ( col for col in dataframe . columns if col [ 0 ] == 'metric.labels' ) dataframe = dataframe [ sorted_columns ] dataframe = dataframe . sort_values ( sorted_columns ) dataframe = dataframe . reset_index ( drop = True ) . fillna ( '' ) return dataframe
Creates a pandas dataframe from the query metadata .
48,882
def get_train_eval_files ( input_dir ) : data_dir = _get_latest_data_dir ( input_dir ) train_pattern = os . path . join ( data_dir , 'train*.tfrecord.gz' ) eval_pattern = os . path . join ( data_dir , 'eval*.tfrecord.gz' ) train_files = file_io . get_matching_files ( train_pattern ) eval_files = file_io . get_matching_files ( eval_pattern ) return train_files , eval_files
Get preprocessed training and eval files .
48,883
def get_labels ( input_dir ) : data_dir = _get_latest_data_dir ( input_dir ) labels_file = os . path . join ( data_dir , 'labels' ) with file_io . FileIO ( labels_file , 'r' ) as f : labels = f . read ( ) . rstrip ( ) . split ( '\n' ) return labels
Get a list of labels from preprocessed output dir .
48,884
def override_if_not_in_args ( flag , argument , args ) : if flag not in args : args . extend ( [ flag , argument ] )
Checks if flags is in args and if not it adds the flag to args .
48,885
def loss ( loss_value ) : total_loss = tf . Variable ( 0.0 , False ) loss_count = tf . Variable ( 0 , False ) total_loss_update = tf . assign_add ( total_loss , loss_value ) loss_count_update = tf . assign_add ( loss_count , 1 ) loss_op = total_loss / tf . cast ( loss_count , tf . float32 ) return [ total_loss_update , loss_count_update ] , loss_op
Calculates aggregated mean loss .
48,886
def accuracy ( logits , labels ) : is_correct = tf . nn . in_top_k ( logits , labels , 1 ) correct = tf . reduce_sum ( tf . cast ( is_correct , tf . int32 ) ) incorrect = tf . reduce_sum ( tf . cast ( tf . logical_not ( is_correct ) , tf . int32 ) ) correct_count = tf . Variable ( 0 , False ) incorrect_count = tf . Variable ( 0 , False ) correct_count_update = tf . assign_add ( correct_count , correct ) incorrect_count_update = tf . assign_add ( incorrect_count , incorrect ) accuracy_op = tf . cast ( correct_count , tf . float32 ) / tf . cast ( correct_count + incorrect_count , tf . float32 ) return [ correct_count_update , incorrect_count_update ] , accuracy_op
Calculates aggregated accuracy .
48,887
def check_dataset ( dataset , mode ) : names = [ x [ 'name' ] for x in dataset . schema ] types = [ x [ 'type' ] for x in dataset . schema ] if mode == 'train' : if ( set ( [ 'image_url' , 'label' ] ) != set ( names ) or any ( t != 'STRING' for t in types ) ) : raise ValueError ( 'Invalid dataset. Expect only "image_url,label" STRING columns.' ) else : if ( set ( [ 'image_url' ] ) != set ( names ) and set ( [ 'image_url' , 'label' ] ) != set ( names ) ) or any ( t != 'STRING' for t in types ) : raise ValueError ( 'Invalid dataset. Expect only "image_url" or "image_url,label" ' + 'STRING columns.' )
Validate we have a good dataset .
48,888
def get_sources_from_dataset ( p , dataset , mode ) : import apache_beam as beam import csv from google . datalab . ml import CsvDataSet , BigQueryDataSet check_dataset ( dataset , mode ) if type ( dataset ) is CsvDataSet : source_list = [ ] for ii , input_path in enumerate ( dataset . files ) : source_list . append ( p | 'Read from Csv %d (%s)' % ( ii , mode ) >> beam . io . ReadFromText ( input_path , strip_trailing_newlines = True ) ) return ( source_list | 'Flatten Sources (%s)' % mode >> beam . Flatten ( ) | 'Create Dict from Csv (%s)' % mode >> beam . Map ( lambda line : csv . DictReader ( [ line ] , fieldnames = [ 'image_url' , 'label' ] ) . next ( ) ) ) elif type ( dataset ) is BigQueryDataSet : bq_source = ( beam . io . BigQuerySource ( table = dataset . table ) if dataset . table is not None else beam . io . BigQuerySource ( query = dataset . query ) ) return p | 'Read source from BigQuery (%s)' % mode >> beam . io . Read ( bq_source ) else : raise ValueError ( 'Invalid DataSet. Expect CsvDataSet or BigQueryDataSet' )
get pcollection from dataset .
48,889
def decode_and_resize ( image_str_tensor ) : height = 299 width = 299 channels = 3 image = tf . image . decode_jpeg ( image_str_tensor , channels = channels ) image = tf . expand_dims ( image , 0 ) image = tf . image . resize_bilinear ( image , [ height , width ] , align_corners = False ) image = tf . squeeze ( image , squeeze_dims = [ 0 ] ) image = tf . cast ( image , dtype = tf . uint8 ) return image
Decodes jpeg string resizes it and returns a uint8 tensor .
48,890
def resize_image ( image_str_tensor ) : image = decode_and_resize ( image_str_tensor ) image = tf . image . encode_jpeg ( image , quality = 100 ) return image
Decodes jpeg string resizes it and re - encode it to jpeg .
48,891
def load_images ( image_files , resize = True ) : images = [ ] for image_file in image_files : with file_io . FileIO ( image_file , 'r' ) as ff : images . append ( ff . read ( ) ) if resize is False : return images image_str_tensor = tf . placeholder ( tf . string , shape = [ None ] ) image = tf . map_fn ( resize_image , image_str_tensor , back_prop = False ) feed_dict = collections . defaultdict ( list ) feed_dict [ image_str_tensor . name ] = images with tf . Session ( ) as sess : images_resized = sess . run ( image , feed_dict = feed_dict ) return images_resized
Load images from files and optionally resize it .
48,892
def process_prediction_results ( results , show_image ) : import pandas as pd if ( is_in_IPython ( ) and show_image is True ) : import IPython for image_url , image , label_and_score in results : IPython . display . display_html ( '<p style="font-size:28px">%s(%.5f)</p>' % label_and_score , raw = True ) IPython . display . display ( IPython . display . Image ( data = image ) ) result_dict = [ { 'image_url' : url , 'label' : r [ 0 ] , 'score' : r [ 1 ] } for url , _ , r in results ] return pd . DataFrame ( result_dict )
Create DataFrames out of prediction results and display images in IPython if requested .
48,893
def repackage_to_staging ( output_path ) : import google . datalab . ml as ml package_root = os . path . join ( os . path . dirname ( __file__ ) , '../../../' ) setup_py = os . path . join ( os . path . dirname ( __file__ ) , 'setup.py' ) staging_package_url = os . path . join ( output_path , 'staging' , 'image_classification.tar.gz' ) ml . package_and_copy ( package_root , setup_py , staging_package_url ) return staging_package_url
Repackage it from local installed location and copy it to GCS .
48,894
def generate_airflow_spec ( name , pipeline_spec ) : task_definitions = '' up_steam_statements = '' parameters = pipeline_spec . get ( 'parameters' ) for ( task_id , task_details ) in sorted ( pipeline_spec [ 'tasks' ] . items ( ) ) : task_def = PipelineGenerator . _get_operator_definition ( task_id , task_details , parameters ) task_definitions = task_definitions + task_def dependency_def = PipelineGenerator . _get_dependency_definition ( task_id , task_details . get ( 'up_stream' , [ ] ) ) up_steam_statements = up_steam_statements + dependency_def schedule_config = pipeline_spec . get ( 'schedule' , { } ) default_args = PipelineGenerator . _get_default_args ( schedule_config , pipeline_spec . get ( 'emails' , { } ) ) dag_definition = PipelineGenerator . _get_dag_definition ( name , schedule_config . get ( 'interval' , '@once' ) , schedule_config . get ( 'catchup' , False ) ) return PipelineGenerator . _imports + default_args + dag_definition + task_definitions + up_steam_statements
Gets the airflow python spec for the Pipeline object .
48,895
def _get_dependency_definition ( task_id , dependencies ) : set_upstream_statements = '' for dependency in dependencies : set_upstream_statements = set_upstream_statements + '{0}.set_upstream({1})' . format ( task_id , dependency ) + '\n' return set_upstream_statements
Internal helper collects all the dependencies of the task and returns the Airflow equivalent python sytax for specifying them .
48,896
def _get_operator_class_name ( task_detail_type ) : task_type_to_operator_prefix_mapping = { 'pydatalab.bq.execute' : ( 'Execute' , 'google.datalab.contrib.bigquery.operators._bq_execute_operator' ) , 'pydatalab.bq.extract' : ( 'Extract' , 'google.datalab.contrib.bigquery.operators._bq_extract_operator' ) , 'pydatalab.bq.load' : ( 'Load' , 'google.datalab.contrib.bigquery.operators._bq_load_operator' ) , 'Bash' : ( 'Bash' , 'airflow.operators.bash_operator' ) } ( operator_class_prefix , module ) = task_type_to_operator_prefix_mapping . get ( task_detail_type , ( None , __name__ ) ) format_string = '{0}Operator' operator_class_name = format_string . format ( operator_class_prefix ) if operator_class_prefix is None : return format_string . format ( task_detail_type ) , module return operator_class_name , module
Internal helper gets the name of the Airflow operator class . We maintain this in a map so this method really returns the enum name concatenated with the string Operator .
48,897
def _get_operator_param_name_and_values ( operator_class_name , task_details ) : operator_task_details = task_details . copy ( ) if 'type' in operator_task_details . keys ( ) : del operator_task_details [ 'type' ] if 'up_stream' in operator_task_details . keys ( ) : del operator_task_details [ 'up_stream' ] if ( operator_class_name == 'BigQueryOperator' ) : return PipelineGenerator . _get_bq_execute_params ( operator_task_details ) if ( operator_class_name == 'BigQueryToCloudStorageOperator' ) : return PipelineGenerator . _get_bq_extract_params ( operator_task_details ) if ( operator_class_name == 'GoogleCloudStorageToBigQueryOperator' ) : return PipelineGenerator . _get_bq_load_params ( operator_task_details ) return operator_task_details
Internal helper gets the name of the python parameter for the Airflow operator class . In some cases we do not expose the airflow parameter name in its native form but choose to expose a name that s more standard for Datalab or one that s more friendly . For example Airflow s BigQueryOperator uses bql for the query string but we want %%bq users in Datalab to use query . Hence a few substitutions that are specific to the Airflow operator need to be made .
48,898
def sample ( self , n ) : total = bq . Query ( 'select count(*) from %s' % self . _get_source ( ) ) . execute ( ) . result ( ) [ 0 ] . values ( ) [ 0 ] if n > total : raise ValueError ( 'sample larger than population' ) sampling = bq . Sampling . random ( percent = n * 100.0 / float ( total ) ) if self . _query is not None : source = self . _query else : source = 'SELECT * FROM `%s`' % self . _table sample = bq . Query ( source ) . execute ( sampling = sampling ) . result ( ) df = sample . to_dataframe ( ) return df
Samples data into a Pandas DataFrame . Note that it calls BigQuery so it will incur cost .
48,899
def size ( self ) : import tensorflow as tf if self . _size is None : self . _size = 0 options = tf . python_io . TFRecordOptions ( tf . python_io . TFRecordCompressionType . GZIP ) for tfexample_file in self . files : self . _size += sum ( 1 for x in tf . python_io . tf_record_iterator ( tfexample_file , options = options ) ) return self . _size
The number of instances in the data . If the underlying data source changes it may be outdated .