idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
49,000
def snapshot ( self , at ) : if self . _name_parts . decorator != '' : raise Exception ( "Cannot use snapshot() on an already decorated table" ) value = Table . _convert_decorator_time ( at ) return Table ( "%s@%s" % ( self . _full_name , str ( value ) ) , context = self . _context )
Return a new Table which is a snapshot of this table at the specified time .
49,001
def window ( self , begin , end = None ) : if self . _name_parts . decorator != '' : raise Exception ( "Cannot use window() on an already decorated table" ) start = Table . _convert_decorator_time ( begin ) if end is None : if isinstance ( begin , datetime . timedelta ) : end = datetime . timedelta ( 0 ) else : end = datetime . datetime . utcnow ( ) stop = Table . _convert_decorator_time ( end ) if ( start > 0 >= stop ) or ( stop > 0 >= start ) : raise Exception ( "window: Between arguments must both be absolute or relative: %s, %s" % ( str ( begin ) , str ( end ) ) ) if start > stop : raise Exception ( "window: Between arguments: begin must be before end: %s, %s" % ( str ( begin ) , str ( end ) ) ) return Table ( "%s@%s-%s" % ( self . _full_name , str ( start ) , str ( stop ) ) , context = self . _context )
Return a new Table limited to the rows added to this Table during the specified time range .
49,002
def serialize_example ( transformed_json_data , features , feature_indices , target_name ) : import six import tensorflow as tf from trainer import feature_transforms line = str ( transformed_json_data [ target_name ] [ 0 ] ) for name , info in feature_indices : if features [ name ] [ 'transform' ] in [ feature_transforms . IDENTITY_TRANSFORM , feature_transforms . SCALE_TRANSFORM ] : line += ' %d:%s' % ( info [ 'index_start' ] , str ( transformed_json_data [ name ] [ 0 ] ) ) elif features [ name ] [ 'transform' ] in [ feature_transforms . ONE_HOT_TRANSFORM , feature_transforms . MULTI_HOT_TRANSFORM ] : for i in range ( info [ 'size' ] ) : if i in transformed_json_data [ name ] : line += ' %d:1' % ( info [ 'index_start' ] + i ) elif features [ name ] [ 'transform' ] in [ feature_transforms . IMAGE_TRANSFORM ] : for i in range ( info [ 'size' ] ) : line += ' %d:%s' % ( info [ 'index_start' ] + i , str ( transformed_json_data [ name ] [ i ] ) ) return line
Makes an instance of data in libsvm format .
49,003
def delete ( self , delete_contents = False ) : if not self . exists ( ) : raise Exception ( 'Cannot delete non-existent dataset %s' % self . _full_name ) try : self . _api . datasets_delete ( self . _name_parts , delete_contents = delete_contents ) except Exception as e : raise e self . _info = None return None
Issues a request to delete the dataset .
49,004
def create ( self , friendly_name = None , description = None ) : if not self . exists ( ) : try : response = self . _api . datasets_insert ( self . _name_parts , friendly_name = friendly_name , description = description ) except Exception as e : raise e if 'selfLink' not in response : raise Exception ( "Could not create dataset %s" % self . _full_name ) return self
Creates the Dataset with the specified friendly name and description .
49,005
def update ( self , friendly_name = None , description = None ) : self . _get_info ( ) if self . _info : if friendly_name : self . _info [ 'friendlyName' ] = friendly_name if description : self . _info [ 'description' ] = description try : self . _api . datasets_update ( self . _name_parts , self . _info ) except Exception as e : raise e finally : self . _info = None
Selectively updates Dataset information .
49,006
def query ( self ) : if not self . exists ( ) : return None self . _table . _load_info ( ) if 'view' in self . _table . _info and 'query' in self . _table . _info [ 'view' ] : return _query . Query ( self . _table . _info [ 'view' ] [ 'query' ] ) return None
The Query that defines the view .
49,007
def run_numerical_categorical_analysis ( args , schema_list ) : header = [ column [ 'name' ] for column in schema_list ] input_files = file_io . get_matching_files ( args . input_file_pattern ) for col_schema in schema_list : col_type = col_schema [ 'type' ] . lower ( ) if col_type != 'string' and col_type != 'integer' and col_type != 'float' : raise ValueError ( 'Schema contains an unsupported type %s.' % col_type ) def _init_numerical_results ( ) : return { 'min' : float ( 'inf' ) , 'max' : float ( '-inf' ) , 'count' : 0 , 'sum' : 0.0 } numerical_results = collections . defaultdict ( _init_numerical_results ) categorical_results = collections . defaultdict ( set ) for input_file in input_files : with file_io . FileIO ( input_file , 'r' ) as f : for line in f : parsed_line = dict ( zip ( header , line . strip ( ) . split ( ',' ) ) ) for col_schema in schema_list : col_name = col_schema [ 'name' ] col_type = col_schema [ 'type' ] if col_type . lower ( ) == 'string' : categorical_results [ col_name ] . update ( [ parsed_line [ col_name ] ] ) else : if not parsed_line [ col_name ] . strip ( ) : continue numerical_results [ col_name ] [ 'min' ] = ( min ( numerical_results [ col_name ] [ 'min' ] , float ( parsed_line [ col_name ] ) ) ) numerical_results [ col_name ] [ 'max' ] = ( max ( numerical_results [ col_name ] [ 'max' ] , float ( parsed_line [ col_name ] ) ) ) numerical_results [ col_name ] [ 'count' ] += 1 numerical_results [ col_name ] [ 'sum' ] += float ( parsed_line [ col_name ] ) for col_schema in schema_list : if col_schema [ 'type' ] . lower ( ) != 'string' : col_name = col_schema [ 'name' ] mean = numerical_results [ col_name ] [ 'sum' ] / numerical_results [ col_name ] [ 'count' ] del numerical_results [ col_name ] [ 'sum' ] del numerical_results [ col_name ] [ 'count' ] numerical_results [ col_name ] [ 'mean' ] = mean file_io . write_string_to_file ( os . path . join ( args . output_dir , NUMERICAL_ANALYSIS_FILE ) , json . dumps ( numerical_results , indent = 2 , separators = ( ',' , ': ' ) ) ) for name , unique_labels in six . iteritems ( categorical_results ) : labels = '\n' . join ( list ( unique_labels ) ) file_io . write_string_to_file ( os . path . join ( args . output_dir , CATEGORICAL_ANALYSIS_FILE % name ) , labels )
Makes the numerical and categorical analysis files .
49,008
def run_analysis ( args ) : schema_list = json . loads ( file_io . read_file_to_string ( args . schema_file ) ) run_numerical_categorical_analysis ( args , schema_list ) file_io . copy ( args . schema_file , os . path . join ( args . output_dir , SCHEMA_FILE ) , overwrite = True )
Builds an analysis files for training .
49,009
def _repr_html_ ( self ) : parts = [ ] if self . _class : parts . append ( '<div id="hh_%s" class="%s">%s</div>' % ( self . _id , self . _class , self . _markup ) ) else : parts . append ( '<div id="hh_%s">%s</div>' % ( self . _id , self . _markup ) ) if len ( self . _script ) != 0 : parts . append ( '<script>' ) parts . append ( 'require([' ) parts . append ( ',' . join ( [ '"%s"' % d [ 0 ] for d in self . _dependencies ] ) ) parts . append ( '], function(' ) parts . append ( ',' . join ( [ d [ 1 ] for d in self . _dependencies ] ) ) parts . append ( ') {' ) parts . append ( self . _script ) parts . append ( '});' ) parts . append ( '</script>' ) return '' . join ( parts )
Generates the HTML representation .
49,010
def _render_objects ( self , items , attributes = None , datatype = 'object' ) : if not items : return if datatype == 'chartdata' : if not attributes : attributes = [ items [ 'cols' ] [ i ] [ 'label' ] for i in range ( 0 , len ( items [ 'cols' ] ) ) ] items = items [ 'rows' ] indices = { attributes [ i ] : i for i in range ( 0 , len ( attributes ) ) } num_segments = len ( self . _segments ) self . _segments . append ( '<table>' ) first = True for o in items : if first : first = False if datatype == 'dict' and not attributes : attributes = list ( o . keys ( ) ) if attributes is not None : self . _segments . append ( '<tr>' ) for attr in attributes : self . _segments . append ( '<th>%s</th>' % attr ) self . _segments . append ( '</tr>' ) self . _segments . append ( '<tr>' ) if attributes is None : self . _segments . append ( '<td>%s</td>' % HtmlBuilder . _format ( o ) ) else : for attr in attributes : if datatype == 'dict' : self . _segments . append ( '<td>%s</td>' % HtmlBuilder . _format ( o . get ( attr , None ) , nbsp = True ) ) elif datatype == 'chartdata' : self . _segments . append ( '<td>%s</td>' % HtmlBuilder . _format ( o [ 'c' ] [ indices [ attr ] ] [ 'v' ] , nbsp = True ) ) else : self . _segments . append ( '<td>%s</td>' % HtmlBuilder . _format ( o . __getattribute__ ( attr ) , nbsp = True ) ) self . _segments . append ( '</tr>' ) self . _segments . append ( '</table>' ) if first : self . _segments = self . _segments [ : num_segments ]
Renders an HTML table with the specified list of objects .
49,011
def _render_list ( self , items , empty = '<pre>&lt;empty&gt;</pre>' ) : if not items or len ( items ) == 0 : self . _segments . append ( empty ) return self . _segments . append ( '<ul>' ) for o in items : self . _segments . append ( '<li>' ) self . _segments . append ( str ( o ) ) self . _segments . append ( '</li>' ) self . _segments . append ( '</ul>' )
Renders an HTML list with the specified list of strings .
49,012
def sample ( self , fields = None , count = 5 , sampling = None , use_cache = True , dialect = None , billing_tier = None ) : from . import _query sql = self . _repr_sql_ ( ) return _query . Query . sampling_query ( sql , context = self . _context , count = count , fields = fields , sampling = sampling ) . results ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier )
Retrieves a sampling of data from the table .
49,013
def _encode_dict_as_row ( record , column_name_map ) : for k in list ( record . keys ( ) ) : v = record [ k ] if isinstance ( v , pandas . Timestamp ) or isinstance ( v , datetime . datetime ) : v = record [ k ] = record [ k ] . isoformat ( ) if k not in column_name_map : column_name_map [ k ] = '' . join ( c for c in k if c in Table . _VALID_COLUMN_NAME_CHARACTERS ) new_k = column_name_map [ k ] if k != new_k : record [ new_k ] = v del record [ k ] return record
Encode a dictionary representing a table row in a form suitable for streaming to BQ .
49,014
def insert_data ( self , data , include_index = False , index_name = None ) : max_rows_per_post = 500 post_interval = 0.05 if not self . exists ( ) : raise Exception ( 'Table %s does not exist.' % self . _full_name ) data_schema = _schema . Schema . from_data ( data ) if isinstance ( data , list ) : if include_index : if not index_name : index_name = 'Index' data_schema . _add_field ( index_name , 'INTEGER' ) table_schema = self . schema for data_field in data_schema : name = data_field . name table_field = table_schema [ name ] if table_field is None : raise Exception ( 'Table does not contain field %s' % name ) data_type = data_field . data_type table_type = table_field . data_type if table_type != data_type : raise Exception ( 'Field %s in data has type %s but in table has type %s' % ( name , data_type , table_type ) ) total_rows = len ( data ) total_pushed = 0 job_id = uuid . uuid4 ( ) . hex rows = [ ] column_name_map = { } is_dataframe = isinstance ( data , pandas . DataFrame ) if is_dataframe : gen = data . reset_index ( drop = not include_index ) . iterrows ( ) else : gen = enumerate ( data ) for index , row in gen : if is_dataframe : row = row . to_dict ( ) elif include_index : row [ index_name ] = index rows . append ( { 'json' : self . _encode_dict_as_row ( row , column_name_map ) , 'insertId' : job_id + str ( index ) } ) total_pushed += 1 if ( total_pushed == total_rows ) or ( len ( rows ) == max_rows_per_post ) : try : response = self . _api . tabledata_insert_all ( self . _name_parts , rows ) except Exception as e : raise e if 'insertErrors' in response : raise Exception ( 'insertAll failed: %s' % response [ 'insertErrors' ] ) time . sleep ( post_interval ) rows = [ ] while True : self . _info = self . _api . tables_get ( self . _name_parts ) if 'streamingBuffer' not in self . _info or 'estimatedRows' not in self . _info [ 'streamingBuffer' ] or int ( self . _info [ 'streamingBuffer' ] [ 'estimatedRows' ] ) > 0 : break time . sleep ( 2 ) return self
Insert the contents of a Pandas DataFrame or a list of dictionaries into the table .
49,015
def range ( self , start_row = 0 , max_rows = None ) : fetcher = self . _get_row_fetcher ( start_row = start_row , max_rows = max_rows ) return iter ( datalab . utils . Iterator ( fetcher ) )
Get an iterator to iterate through a set of table rows .
49,016
def to_file_async ( self , destination , format = 'csv' , csv_delimiter = ',' , csv_header = True ) : self . to_file ( destination , format = format , csv_delimiter = csv_delimiter , csv_header = csv_header )
Start saving the results to a local file in CSV format and return a Job for completion .
49,017
def update ( self , friendly_name = None , description = None , expiry = None , schema = None ) : self . _load_info ( ) if friendly_name is not None : self . _info [ 'friendlyName' ] = friendly_name if description is not None : self . _info [ 'description' ] = description if expiry is not None : if isinstance ( expiry , datetime . datetime ) : expiry = calendar . timegm ( expiry . utctimetuple ( ) ) * 1000 self . _info [ 'expirationTime' ] = expiry if schema is not None : if isinstance ( schema , _schema . Schema ) : schema = schema . _bq_schema self . _info [ 'schema' ] = { 'fields' : schema } try : self . _api . table_update ( self . _name_parts , self . _info ) except datalab . utils . RequestException : self . _info = None except Exception as e : raise e
Selectively updates Table information .
49,018
def to_query ( self , fields = None ) : from . import _query if fields is None : fields = '*' elif isinstance ( fields , list ) : fields = ',' . join ( fields ) return _query . Query ( 'SELECT %s FROM %s' % ( fields , self . _repr_sql_ ( ) ) , context = self . _context )
Return a Query for this Table .
49,019
def copy_to ( self , new_key , bucket = None ) : if bucket is None : bucket = self . _bucket try : new_info = self . _api . objects_copy ( self . _bucket , self . _key , bucket , new_key ) except Exception as e : raise e return Item ( bucket , new_key , new_info , context = self . _context )
Copies this item to the specified new key .
49,020
def exists ( self ) : try : return self . metadata is not None except datalab . utils . RequestException : return False except Exception as e : raise e
Checks if the item exists .
49,021
def delete ( self ) : if self . exists ( ) : try : self . _api . objects_delete ( self . _bucket , self . _key ) except Exception as e : raise e
Deletes this item from its bucket .
49,022
def write_to ( self , content , content_type ) : try : self . _api . object_upload ( self . _bucket , self . _key , content , content_type ) except Exception as e : raise e
Writes text content to this item .
49,023
def contains ( self , key ) : try : self . _api . objects_get ( self . _bucket , key ) except datalab . utils . RequestException as e : if e . status == 404 : return False raise e except Exception as e : raise e return True
Checks if the specified item exists .
49,024
def request ( url , args = None , data = None , headers = None , method = None , credentials = None , raw_response = False , stats = None ) : if headers is None : headers = { } headers [ 'user-agent' ] = 'GoogleCloudDataLab/1.0' if args is not None : qs = urllib . parse . urlencode ( args ) url = url + '?' + qs if data is not None : if method is None : method = 'POST' if data != '' : if 'Content-Type' not in headers : data = json . dumps ( data ) headers [ 'Content-Type' ] = 'application/json' headers [ 'Content-Length' ] = str ( len ( data ) ) else : if method == 'POST' : headers [ 'Content-Length' ] = '0' if method is None : method = 'GET' http = Http . http if credentials is not None : http = copy . copy ( http ) http = google_auth_httplib2 . AuthorizedHttp ( credentials ) if stats is not None : stats [ 'duration' ] = datetime . datetime . utcnow ( ) response = None try : log . debug ( 'request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals ( ) ) response , content = http . request ( url , method = method , body = data , headers = headers ) if 200 <= response . status < 300 : if raw_response : return content if type ( content ) == str : return json . loads ( content ) else : return json . loads ( str ( content , encoding = 'UTF-8' ) ) else : raise RequestException ( response . status , content ) except ValueError : raise Exception ( 'Failed to process HTTP response.' ) except httplib2 . HttpLib2Error : raise Exception ( 'Failed to send HTTP request.' ) finally : if stats is not None : stats [ 'data_size' ] = len ( data ) stats [ 'status' ] = response . status stats [ 'duration' ] = ( datetime . datetime . utcnow ( ) - stats [ 'duration' ] ) . total_seconds ( )
Issues HTTP requests .
49,025
def _add_command ( parser , subparser_fn , handler , cell_required = False , cell_prohibited = False ) : sub_parser = subparser_fn ( parser ) sub_parser . set_defaults ( func = lambda args , cell : _dispatch_handler ( args , cell , sub_parser , handler , cell_required = cell_required , cell_prohibited = cell_prohibited ) )
Create and initialize a pipeline subcommand handler .
49,026
def pipeline ( line , cell = None ) : return google . datalab . utils . commands . handle_magic_line ( line , cell , _pipeline_parser )
Implements the pipeline cell magic for ipython notebooks .
49,027
def _dispatch_handler ( args , cell , parser , handler , cell_required = False , cell_prohibited = False ) : if cell_prohibited : if cell and len ( cell . strip ( ) ) : parser . print_help ( ) raise Exception ( 'Additional data is not supported with the %s command.' % parser . prog ) return handler ( args ) if cell_required and not cell : parser . print_help ( ) raise Exception ( 'The %s command requires additional data' % parser . prog ) return handler ( args , cell )
Makes sure cell magics include cell and line magics don t before dispatching to handler .
49,028
def expand_defaults ( schema , features ) : schema_names = [ x [ 'name' ] for x in schema ] for name , transform in six . iteritems ( features ) : if 'source_column' not in transform : transform [ 'source_column' ] = name used_schema_columns = [ ] for name , transform in six . iteritems ( features ) : if transform [ 'source_column' ] not in schema_names : raise ValueError ( 'source column %s is not in the schema for transform %s' % ( transform [ 'source_column' ] , name ) ) used_schema_columns . append ( transform [ 'source_column' ] ) for col_schema in schema : schema_name = col_schema [ 'name' ] schema_type = col_schema [ 'type' ] . lower ( ) if schema_type not in constant . NUMERIC_SCHEMA + [ constant . STRING_SCHEMA ] : raise ValueError ( ( 'Only the following schema types are supported: %s' % ' ' . join ( constant . NUMERIC_SCHEMA + [ constant . STRING_SCHEMA ] ) ) ) if schema_name not in used_schema_columns : if schema_type in constant . NUMERIC_SCHEMA : features [ schema_name ] = { 'transform' : constant . DEFAULT_NUMERIC_TRANSFORM , 'source_column' : schema_name } elif schema_type == constant . STRING_SCHEMA : features [ schema_name ] = { 'transform' : constant . DEFAULT_CATEGORICAL_TRANSFORM , 'source_column' : schema_name } else : raise NotImplementedError ( 'Unknown type %s' % schema_type )
Add to features any default transformations .
49,029
def _sample_cell ( args , cell_body ) : env = datalab . utils . commands . notebook_environment ( ) query = None table = None view = None if args [ 'query' ] : query = _get_query_argument ( args , cell_body , env ) elif args [ 'table' ] : table = _get_table ( args [ 'table' ] ) elif args [ 'view' ] : view = datalab . utils . commands . get_notebook_item ( args [ 'view' ] ) if not isinstance ( view , datalab . bigquery . View ) : raise Exception ( '%s is not a view' % args [ 'view' ] ) else : query = datalab . bigquery . Query ( cell_body , values = env ) count = args [ 'count' ] method = args [ 'method' ] if method == 'random' : sampling = datalab . bigquery . Sampling . random ( percent = args [ 'percent' ] , count = count ) elif method == 'hashed' : sampling = datalab . bigquery . Sampling . hashed ( field_name = args [ 'field' ] , percent = args [ 'percent' ] , count = count ) elif method == 'sorted' : ascending = args [ 'order' ] == 'ascending' sampling = datalab . bigquery . Sampling . sorted ( args [ 'field' ] , ascending = ascending , count = count ) elif method == 'limit' : sampling = datalab . bigquery . Sampling . default ( count = count ) else : sampling = datalab . bigquery . Sampling . default ( count = count ) if query : results = query . sample ( sampling = sampling , dialect = args [ 'dialect' ] , billing_tier = args [ 'billing' ] ) elif view : results = view . sample ( sampling = sampling ) else : results = table . sample ( sampling = sampling ) if args [ 'verbose' ] : print ( results . sql ) if args [ 'profile' ] : return datalab . utils . commands . profile_df ( results . to_dataframe ( ) ) else : return results
Implements the bigquery sample cell magic for ipython notebooks .
49,030
def _create_cell ( args , cell_body ) : if args [ 'command' ] == 'dataset' : try : datalab . bigquery . Dataset ( args [ 'name' ] ) . create ( friendly_name = args [ 'friendly' ] , description = cell_body ) except Exception as e : print ( 'Failed to create dataset %s: %s' % ( args [ 'name' ] , e ) ) else : if cell_body is None : print ( 'Failed to create %s: no schema specified' % args [ 'name' ] ) else : try : record = datalab . utils . commands . parse_config ( cell_body , datalab . utils . commands . notebook_environment ( ) , as_dict = False ) schema = datalab . bigquery . Schema ( record ) datalab . bigquery . Table ( args [ 'name' ] ) . create ( schema = schema , overwrite = args [ 'overwrite' ] ) except Exception as e : print ( 'Failed to create table %s: %s' % ( args [ 'name' ] , e ) )
Implements the BigQuery cell magic used to create datasets and tables .
49,031
def _delete_cell ( args , _ ) : if args [ 'command' ] == 'dataset' : try : datalab . bigquery . Dataset ( args [ 'name' ] ) . delete ( ) except Exception as e : print ( 'Failed to delete dataset %s: %s' % ( args [ 'name' ] , e ) ) else : try : datalab . bigquery . Table ( args [ 'name' ] ) . delete ( ) except Exception as e : print ( 'Failed to delete table %s: %s' % ( args [ 'name' ] , e ) )
Implements the BigQuery cell magic used to delete datasets and tables .
49,032
def _udf_cell ( args , js ) : variable_name = args [ 'module' ] if not variable_name : raise Exception ( 'Declaration must be of the form %%bigquery udf --module <variable name>' ) spec_pattern = r'\{\{([^}]+)\}\}' spec_part_pattern = r'[a-z_][a-z0-9_]*' specs = re . findall ( spec_pattern , js ) if len ( specs ) < 2 : raise Exception ( 'The JavaScript must declare the input row and output emitter parameters ' 'using valid jsdoc format comments.\n' 'The input row param declaration must be typed as {{field:type, field2:type}} ' 'and the output emitter param declaration must be typed as ' 'function({{field:type, field2:type}}.' ) inputs = [ ] input_spec_parts = re . findall ( spec_part_pattern , specs [ 0 ] , flags = re . IGNORECASE ) if len ( input_spec_parts ) % 2 != 0 : raise Exception ( 'Invalid input row param declaration. The jsdoc type expression must ' 'define an object with field and type pairs.' ) for n , t in zip ( input_spec_parts [ 0 : : 2 ] , input_spec_parts [ 1 : : 2 ] ) : inputs . append ( ( n , t ) ) outputs = [ ] output_spec_parts = re . findall ( spec_part_pattern , specs [ 1 ] , flags = re . IGNORECASE ) if len ( output_spec_parts ) % 2 != 0 : raise Exception ( 'Invalid output emitter param declaration. The jsdoc type expression must ' 'define a function accepting an an object with field and type pairs.' ) for n , t in zip ( output_spec_parts [ 0 : : 2 ] , output_spec_parts [ 1 : : 2 ] ) : outputs . append ( ( n , t ) ) import_pattern = r'@import[\s]+(gs://[a-z\d][a-z\d_\.\-]*[a-z\d]/[^\n\r]+)' imports = re . findall ( import_pattern , js ) split_pattern = r'(.*)(/\*.*?@param.*?@param.*?\*/\w*\n\w*function\w*\(.*?^}\n?)(.*)' parts = re . match ( split_pattern , js , re . MULTILINE | re . DOTALL ) support_code = '' if parts : support_code = ( parts . group ( 1 ) + parts . group ( 3 ) ) . strip ( ) if len ( support_code ) : js = parts . group ( 2 ) udf = datalab . bigquery . UDF ( inputs , outputs , variable_name , js , support_code , imports ) datalab . utils . commands . notebook_environment ( ) [ variable_name ] = udf
Implements the bigquery_udf cell magic for ipython notebooks .
49,033
def _pipeline_cell ( args , cell_body ) : if args [ 'action' ] == 'deploy' : raise Exception ( 'Deploying a pipeline is not yet supported' ) env = { } for key , value in datalab . utils . commands . notebook_environment ( ) . items ( ) : if isinstance ( value , datalab . bigquery . _udf . UDF ) : env [ key ] = value query = _get_query_argument ( args , cell_body , env ) if args [ 'verbose' ] : print ( query . sql ) if args [ 'action' ] == 'dryrun' : print ( query . sql ) result = query . execute_dry_run ( ) return datalab . bigquery . _query_stats . QueryStats ( total_bytes = result [ 'totalBytesProcessed' ] , is_cached = result [ 'cacheHit' ] ) if args [ 'action' ] == 'run' : return query . execute ( args [ 'target' ] , table_mode = args [ 'mode' ] , use_cache = not args [ 'nocache' ] , allow_large_results = args [ 'large' ] , dialect = args [ 'dialect' ] , billing_tier = args [ 'billing' ] ) . results
Implements the BigQuery cell magic used to validate execute or deploy BQ pipelines .
49,034
def _table_line ( args ) : name = args [ 'table' ] table = _get_table ( name ) if table and table . exists ( ) : fields = args [ 'cols' ] . split ( ',' ) if args [ 'cols' ] else None html = _table_viewer ( table , rows_per_page = args [ 'rows' ] , fields = fields ) return IPython . core . display . HTML ( html ) else : raise Exception ( 'Table %s does not exist; cannot display' % name )
Implements the BigQuery table magic used to display tables .
49,035
def _get_schema ( name ) : item = datalab . utils . commands . get_notebook_item ( name ) if not item : item = _get_table ( name ) if isinstance ( item , datalab . bigquery . Schema ) : return item if hasattr ( item , 'schema' ) and isinstance ( item . schema , datalab . bigquery . _schema . Schema ) : return item . schema return None
Given a variable or table name get the Schema if it exists .
49,036
def _render_table ( data , fields = None ) : return IPython . core . display . HTML ( datalab . utils . commands . HtmlBuilder . render_table ( data , fields ) )
Helper to render a list of dictionaries as an HTML display object .
49,037
def _datasets_line ( args ) : filter_ = args [ 'filter' ] if args [ 'filter' ] else '*' return _render_list ( [ str ( dataset ) for dataset in datalab . bigquery . Datasets ( args [ 'project' ] ) if fnmatch . fnmatch ( str ( dataset ) , filter_ ) ] )
Implements the BigQuery datasets magic used to display datasets in a project .
49,038
def _tables_line ( args ) : filter_ = args [ 'filter' ] if args [ 'filter' ] else '*' if args [ 'dataset' ] : if args [ 'project' ] is None : datasets = [ datalab . bigquery . Dataset ( args [ 'dataset' ] ) ] else : datasets = [ datalab . bigquery . Dataset ( ( args [ 'project' ] , args [ 'dataset' ] ) ) ] else : datasets = datalab . bigquery . Datasets ( args [ 'project' ] ) tables = [ ] for dataset in datasets : tables . extend ( [ str ( table ) for table in dataset if fnmatch . fnmatch ( str ( table ) , filter_ ) ] ) return _render_list ( tables )
Implements the BigQuery tables magic used to display tables in a dataset .
49,039
def _extract_line ( args ) : name = args [ 'source' ] source = datalab . utils . commands . get_notebook_item ( name ) if not source : source = _get_table ( name ) if not source : raise Exception ( 'No source named %s found' % name ) elif isinstance ( source , datalab . bigquery . Table ) and not source . exists ( ) : raise Exception ( 'Table %s does not exist' % name ) else : job = source . extract ( args [ 'destination' ] , format = 'CSV' if args [ 'format' ] == 'csv' else 'NEWLINE_DELIMITED_JSON' , compress = args [ 'compress' ] , csv_delimiter = args [ 'delimiter' ] , csv_header = args [ 'header' ] ) if job . failed : raise Exception ( 'Extract failed: %s' % str ( job . fatal_error ) ) elif job . errors : raise Exception ( 'Extract completed with errors: %s' % str ( job . errors ) )
Implements the BigQuery extract magic used to extract table data to GCS .
49,040
def bigquery ( line , cell = None ) : namespace = { } if line . find ( '$' ) >= 0 : namespace = datalab . utils . commands . notebook_environment ( ) return datalab . utils . commands . handle_magic_line ( line , cell , _bigquery_parser , namespace = namespace )
Implements the bigquery cell magic for ipython notebooks .
49,041
def table ( name = None , mode = 'create' , use_cache = True , priority = 'interactive' , allow_large_results = False ) : output = QueryOutput ( ) output . _output_type = 'table' output . _table_name = name output . _table_mode = mode output . _use_cache = use_cache output . _priority = priority output . _allow_large_results = allow_large_results return output
Construct a query output object where the result is a table
49,042
def file ( path , format = 'csv' , csv_delimiter = ',' , csv_header = True , compress = False , use_cache = True ) : output = QueryOutput ( ) output . _output_type = 'file' output . _file_path = path output . _file_format = format output . _csv_delimiter = csv_delimiter output . _csv_header = csv_header output . _compress_file = compress return output
Construct a query output object where the result is either a local file or a GCS path
49,043
def dataframe ( start_row = 0 , max_rows = None , use_cache = True ) : output = QueryOutput ( ) output . _output_type = 'dataframe' output . _dataframe_start_row = start_row output . _dataframe_max_rows = max_rows output . _use_cache = use_cache return output
Construct a query output object where the result is a dataframe
49,044
def list ( ) : running_list = [ ] parser = argparse . ArgumentParser ( ) parser . add_argument ( '--logdir' ) parser . add_argument ( '--port' ) for p in psutil . process_iter ( ) : if p . name ( ) != 'tensorboard' or p . status ( ) == psutil . STATUS_ZOMBIE : continue cmd_args = p . cmdline ( ) del cmd_args [ 0 : 2 ] args = parser . parse_args ( cmd_args ) running_list . append ( { 'pid' : p . pid , 'logdir' : args . logdir , 'port' : args . port } ) return pd . DataFrame ( running_list )
List running TensorBoard instances .
49,045
def start ( logdir ) : if logdir . startswith ( 'gs://' ) : datalab . storage . _api . Api . verify_permitted_to_read ( logdir ) port = datalab . utils . pick_unused_port ( ) args = [ 'tensorboard' , '--logdir=' + logdir , '--port=' + str ( port ) ] p = subprocess . Popen ( args ) retry = 10 while ( retry > 0 ) : if datalab . utils . is_http_running_on ( port ) : basepath = os . environ . get ( 'DATALAB_ENDPOINT_URL' , '' ) url = '%s/_proxy/%d/' % ( basepath . rstrip ( '/' ) , port ) html = '<p>TensorBoard was started successfully with pid %d. ' % p . pid html += 'Click <a href="%s" target="_blank">here</a> to access it.</p>' % url IPython . display . display_html ( html , raw = True ) return p . pid time . sleep ( 1 ) retry -= 1 raise Exception ( 'Cannot start TensorBoard.' )
Start a TensorBoard instance .
49,046
def stop ( pid ) : if psutil . pid_exists ( pid ) : try : p = psutil . Process ( pid ) p . kill ( ) except Exception : pass
Shut down a specific process .
49,047
def build_graph ( self ) : import tensorflow as tf input_jpeg = tf . placeholder ( tf . string , shape = None ) image = tf . image . decode_jpeg ( input_jpeg , channels = self . CHANNELS ) image = tf . expand_dims ( image , 0 ) image = tf . image . convert_image_dtype ( image , dtype = tf . float32 ) image = tf . image . resize_bilinear ( image , [ self . HEIGHT , self . WIDTH ] , align_corners = False ) image = tf . subtract ( image , 0.5 ) inception_input = tf . multiply ( image , 2.0 ) with tf . contrib . slim . arg_scope ( _inceptionlib . inception_v3_arg_scope ( ) ) : _ , end_points = _inceptionlib . inception_v3 ( inception_input , is_training = False ) embedding = end_points [ 'PreLogits' ] return input_jpeg , embedding
Forms the core by building a wrapper around the inception graph .
49,048
def restore_from_checkpoint ( self , checkpoint_path ) : import tensorflow as tf all_vars = tf . contrib . slim . get_variables_to_restore ( exclude = [ 'InceptionV3/AuxLogits' , 'InceptionV3/Logits' , 'global_step' ] ) saver = tf . train . Saver ( all_vars ) saver . restore ( self . tf_session , checkpoint_path )
To restore inception model variables from the checkpoint file .
49,049
def calculate_embedding ( self , batch_image_bytes ) : return self . tf_session . run ( self . embedding , feed_dict = { self . input_jpeg : batch_image_bytes } )
Get the embeddings for a given JPEG image .
49,050
def add_final_training_ops ( self , embeddings , all_labels_count , bottleneck_tensor_size , hidden_layer_size = BOTTLENECK_TENSOR_SIZE / 4 , dropout_keep_prob = None ) : with tf . name_scope ( 'input' ) : bottleneck_input = tf . placeholder_with_default ( embeddings , shape = [ None , bottleneck_tensor_size ] , name = 'ReshapeSqueezed' ) bottleneck_with_no_gradient = tf . stop_gradient ( bottleneck_input ) with tf . name_scope ( 'Wx_plus_b' ) : hidden = layers . fully_connected ( bottleneck_with_no_gradient , hidden_layer_size ) if dropout_keep_prob : hidden = tf . nn . dropout ( hidden , dropout_keep_prob ) logits = layers . fully_connected ( hidden , all_labels_count , activation_fn = None ) softmax = tf . nn . softmax ( logits , name = 'softmax' ) return softmax , logits
Adds a new softmax and fully - connected layer for training .
49,051
def build_inception_graph ( self ) : image_str_tensor = tf . placeholder ( tf . string , shape = [ None ] ) image = tf . map_fn ( _util . decode_and_resize , image_str_tensor , back_prop = False , dtype = tf . uint8 ) image = tf . image . convert_image_dtype ( image , dtype = tf . float32 ) image = tf . subtract ( image , 0.5 ) image = tf . multiply ( image , 2.0 ) with slim . arg_scope ( _inceptionlib . inception_v3_arg_scope ( ) ) : _ , end_points = _inceptionlib . inception_v3 ( image , is_training = False ) inception_embeddings = end_points [ 'PreLogits' ] inception_embeddings = tf . squeeze ( inception_embeddings , [ 1 , 2 ] , name = 'SpatialSqueeze' ) return image_str_tensor , inception_embeddings
Builds an inception graph and add the necessary input & output tensors .
49,052
def build_graph ( self , data_paths , batch_size , graph_mod ) : tensors = GraphReferences ( ) is_training = graph_mod == GraphMod . TRAIN if data_paths : _ , tensors . examples = _util . read_examples ( data_paths , batch_size , shuffle = is_training , num_epochs = None if is_training else 2 ) else : tensors . examples = tf . placeholder ( tf . string , name = 'input' , shape = ( None , ) ) if graph_mod == GraphMod . PREDICT : inception_input , inception_embeddings = self . build_inception_graph ( ) embeddings = inception_embeddings tensors . input_jpeg = inception_input else : with tf . name_scope ( 'inputs' ) : feature_map = { 'image_uri' : tf . FixedLenFeature ( shape = [ ] , dtype = tf . string , default_value = [ '' ] ) , 'label' : tf . FixedLenFeature ( shape = [ 1 ] , dtype = tf . int64 , default_value = [ len ( self . labels ) ] ) , 'embedding' : tf . FixedLenFeature ( shape = [ BOTTLENECK_TENSOR_SIZE ] , dtype = tf . float32 ) } parsed = tf . parse_example ( tensors . examples , features = feature_map ) labels = tf . squeeze ( parsed [ 'label' ] ) uris = tf . squeeze ( parsed [ 'image_uri' ] ) embeddings = parsed [ 'embedding' ] all_labels_count = len ( self . labels ) + 1 with tf . name_scope ( 'final_ops' ) : softmax , logits = self . add_final_training_ops ( embeddings , all_labels_count , BOTTLENECK_TENSOR_SIZE , dropout_keep_prob = self . dropout if is_training else None ) prediction = tf . argmax ( softmax , 1 ) tensors . predictions = [ prediction , softmax , embeddings ] if graph_mod == GraphMod . PREDICT : return tensors with tf . name_scope ( 'evaluate' ) : loss_value = loss ( logits , labels ) if is_training : tensors . train , tensors . global_step = training ( loss_value ) else : tensors . global_step = tf . Variable ( 0 , name = 'global_step' , trainable = False ) tensors . uris = uris loss_updates , loss_op = _util . loss ( loss_value ) accuracy_updates , accuracy_op = _util . accuracy ( logits , labels ) if not is_training : tf . summary . scalar ( 'accuracy' , accuracy_op ) tf . summary . scalar ( 'loss' , loss_op ) tensors . metric_updates = loss_updates + accuracy_updates tensors . metric_values = [ loss_op , accuracy_op ] return tensors
Builds generic graph for training or eval .
49,053
def restore_from_checkpoint ( self , session , inception_checkpoint_file , trained_checkpoint_file ) : inception_exclude_scopes = [ 'InceptionV3/AuxLogits' , 'InceptionV3/Logits' , 'global_step' , 'final_ops' ] reader = tf . train . NewCheckpointReader ( inception_checkpoint_file ) var_to_shape_map = reader . get_variable_to_shape_map ( ) all_vars = tf . contrib . slim . get_variables_to_restore ( exclude = inception_exclude_scopes ) inception_vars = { var . op . name : var for var in all_vars if var . op . name in var_to_shape_map } inception_saver = tf . train . Saver ( inception_vars ) inception_saver . restore ( session , inception_checkpoint_file ) trained_vars = tf . contrib . slim . get_variables_to_restore ( exclude = inception_exclude_scopes + inception_vars . keys ( ) ) trained_saver = tf . train . Saver ( trained_vars ) trained_saver . restore ( session , trained_checkpoint_file )
To restore model variables from the checkpoint file .
49,054
def build_prediction_graph ( self ) : tensors = self . build_graph ( None , 1 , GraphMod . PREDICT ) keys_placeholder = tf . placeholder ( tf . string , shape = [ None ] ) inputs = { 'key' : keys_placeholder , 'image_bytes' : tensors . input_jpeg } keys = tf . identity ( keys_placeholder ) labels = self . labels + [ 'UNKNOWN' ] labels_tensor = tf . constant ( labels ) labels_table = tf . contrib . lookup . index_to_string_table_from_tensor ( mapping = labels_tensor ) predicted_label = labels_table . lookup ( tensors . predictions [ 0 ] ) labels_tensor = tf . expand_dims ( tf . constant ( labels ) , 0 ) num_instance = tf . shape ( keys ) labels_tensors_n = tf . tile ( labels_tensor , tf . concat ( axis = 0 , values = [ num_instance , [ 1 ] ] ) ) outputs = { 'key' : keys , 'prediction' : predicted_label , 'labels' : labels_tensors_n , 'scores' : tensors . predictions [ 1 ] , } return inputs , outputs
Builds prediction graph and registers appropriate endpoints .
49,055
def export ( self , last_checkpoint , output_dir ) : logging . info ( 'Exporting prediction graph to %s' , output_dir ) with tf . Session ( graph = tf . Graph ( ) ) as sess : inputs , outputs = self . build_prediction_graph ( ) signature_def_map = { 'serving_default' : signature_def_utils . predict_signature_def ( inputs , outputs ) } init_op = tf . global_variables_initializer ( ) sess . run ( init_op ) self . restore_from_checkpoint ( sess , self . inception_checkpoint_file , last_checkpoint ) init_op_serving = control_flow_ops . group ( variables . local_variables_initializer ( ) , tf . tables_initializer ( ) ) builder = saved_model_builder . SavedModelBuilder ( output_dir ) builder . add_meta_graph_and_variables ( sess , [ tag_constants . SERVING ] , signature_def_map = signature_def_map , legacy_init_op = init_op_serving ) builder . save ( False )
Builds a prediction graph and xports the model .
49,056
def format_metric_values ( self , metric_values ) : loss_str = 'N/A' accuracy_str = 'N/A' try : loss_str = 'loss: %.3f' % metric_values [ 0 ] accuracy_str = 'accuracy: %.3f' % metric_values [ 1 ] except ( TypeError , IndexError ) : pass return '%s, %s' % ( loss_str , accuracy_str )
Formats metric values - used for logging purpose .
49,057
def package_and_copy ( package_root_dir , setup_py , output_tar_path ) : if not output_tar_path . startswith ( 'gs://' ) : raise ValueError ( 'output_tar_path needs to be a GCS path.' ) if not os . path . isfile ( setup_py ) : raise ValueError ( 'Supplied file "%s" does not exist.' % setup_py ) dest_setup_py = os . path . join ( package_root_dir , 'setup.py' ) if dest_setup_py != setup_py : if os . path . isfile ( dest_setup_py ) : os . rename ( dest_setup_py , dest_setup_py + '._bak_' ) shutil . copyfile ( setup_py , dest_setup_py ) tempdir = tempfile . mkdtemp ( ) previous_cwd = os . getcwd ( ) os . chdir ( package_root_dir ) try : sdist = [ 'python' , dest_setup_py , 'sdist' , '--format=gztar' , '-d' , tempdir ] subprocess . check_call ( sdist ) source = os . path . join ( tempdir , '*.tar.gz' ) gscopy = [ 'gsutil' , 'cp' , source , output_tar_path ] subprocess . check_call ( gscopy ) return finally : os . chdir ( previous_cwd ) if dest_setup_py != setup_py : os . remove ( dest_setup_py ) if os . path . isfile ( dest_setup_py + '._bak_' ) : os . rename ( dest_setup_py + '._bak_' , dest_setup_py ) shutil . rmtree ( tempdir )
Repackage an CloudML package and copy it to a staging dir .
49,058
def read_file_to_string ( path ) : bytes_string = tf . gfile . Open ( path , 'r' ) . read ( ) return dlutils . python_portable_string ( bytes_string )
Read a file into a string .
49,059
def _date ( val , offset = None ) : if val is None : return val if val == '' or val == 'now' : when = datetime . datetime . utcnow ( ) elif val == 'today' : dt = datetime . datetime . utcnow ( ) when = datetime . datetime ( dt . year , dt . month , dt . day ) elif val == 'yesterday' : dt = datetime . datetime . utcnow ( ) - datetime . timedelta ( 1 ) when = datetime . datetime ( dt . year , dt . month , dt . day ) else : when = datetime . datetime . strptime ( val , "%Y%m%d" ) if offset is not None : for part in offset . split ( ',' ) : unit = part [ - 1 ] quantity = int ( part [ : - 1 ] ) if unit == 'y' : when = datetime . datetime ( year = when . year + quantity , month = when . month , day = when . day , hour = when . hour , minute = when . minute ) elif unit == 'm' : new_year = when . year new_month = when . month + quantity if new_month < 1 : new_month = - new_month new_year += 1 + ( new_month // 12 ) new_month = 12 - new_month % 12 elif new_month > 12 : new_year += ( new_month - 1 ) // 12 new_month = 1 + ( new_month - 1 ) % 12 when = datetime . datetime ( year = new_year , month = new_month , day = when . day , hour = when . hour , minute = when . minute ) elif unit == 'd' : when += datetime . timedelta ( days = quantity ) elif unit == 'h' : when += datetime . timedelta ( hours = quantity ) elif unit == 'M' : when += datetime . timedelta ( minutes = quantity ) return when
A special pseudo - type for pipeline arguments .
49,060
def _make_string_formatter ( f , offset = None ) : format = f delta = offset return lambda v : time . strftime ( format , ( _date ( v , delta ) ) . timetuple ( ) )
A closure - izer for string arguments that include a format and possibly an offset .
49,061
def _make_table_formatter ( f , offset = None ) : format = f delta = offset return lambda v : _resolve_table ( v , format , delta )
A closure - izer for table arguments that include a format and possibly an offset .
49,062
def _arguments ( code , module ) : arg_parser = CommandParser . create ( '' ) try : builtins = { 'source' : _table , 'datestring' : _datestring } env = { } env . update ( builtins ) exec ( code , env ) for key in env : if key in builtins or key [ 0 ] == '_' : continue val = env [ key ] key = '--%s' % key if isinstance ( val , bool ) : if val : arg_parser . add_argument ( key , default = val , action = 'store_true' ) else : arg_parser . add_argument ( key , default = val , action = 'store_false' ) elif isinstance ( val , basestring ) or isinstance ( val , int ) or isinstance ( val , float ) or isinstance ( val , int ) : arg_parser . add_argument ( key , default = val ) elif isinstance ( val , list ) : arg_parser . add_argument ( key , default = val , nargs = '+' ) elif isinstance ( val , tuple ) : arg_parser . add_argument ( key , default = list ( val ) , nargs = '+' ) elif isinstance ( val , dict ) and 'type' in val : if val [ 'type' ] == 'datestring' : arg_parser . add_argument ( key , default = '' , type = _make_string_formatter ( val [ 'format' ] , offset = val [ 'offset' ] ) ) elif val [ 'type' ] == 'table' : if val [ 'format' ] is not None : arg_parser . add_argument ( key , default = '' , type = _make_table_formatter ( val [ 'format' ] , offset = val [ 'offset' ] ) ) else : arg_parser . add_argument ( key , default = val [ 'name' ] , type = _make_table ) else : raise Exception ( 'Cannot generate argument for %s of type %s' % ( key , type ( val ) ) ) else : raise Exception ( 'Cannot generate argument for %s of type %s' % ( key , type ( val ) ) ) except Exception as e : print ( "%%sql arguments: %s from code '%s'" % ( str ( e ) , str ( code ) ) ) return arg_parser
Define pipeline arguments .
49,063
def _split_cell ( cell , module ) : lines = cell . split ( '\n' ) code = None last_def = - 1 name = None define_wild_re = re . compile ( '^DEFINE\s+.*$' , re . IGNORECASE ) define_re = re . compile ( '^DEFINE\s+QUERY\s+([A-Z]\w*)\s*?(.*)$' , re . IGNORECASE ) select_re = re . compile ( '^SELECT\s*.*$' , re . IGNORECASE ) standard_sql_re = re . compile ( '^(CREATE|WITH|INSERT|DELETE|UPDATE)\s*.*$' , re . IGNORECASE ) for i , line in enumerate ( lines ) : define_match = define_re . match ( line ) select_match = select_re . match ( line ) standard_sql_match = standard_sql_re . match ( line ) if i : prior_content = '' . join ( lines [ : i ] ) . strip ( ) if select_match : select_match = len ( prior_content ) == 0 or ( prior_content [ - 1 ] != '(' and not standard_sql_re . match ( prior_content ) ) if standard_sql_match : standard_sql_match = len ( prior_content ) == 0 or not standard_sql_re . match ( prior_content ) if define_match or select_match or standard_sql_match : if code is None : code = ( '\n' . join ( lines [ : i ] ) ) . strip ( ) if len ( code ) : code += '\n' elif last_def >= 0 : query = '\n' . join ( [ line for line in lines [ last_def : i ] if len ( line ) ] ) . strip ( ) if select_match and name != datalab . data . _utils . _SQL_MODULE_MAIN and len ( query ) == 0 : continue statement = datalab . data . SqlStatement ( query , module ) module . __dict__ [ name ] = statement module . __dict__ [ datalab . data . _utils . _SQL_MODULE_LAST ] = statement if define_match : name = define_match . group ( 1 ) lines [ i ] = define_match . group ( 2 ) else : name = datalab . data . _utils . _SQL_MODULE_MAIN last_def = i else : define_wild_match = define_wild_re . match ( line ) if define_wild_match : raise Exception ( 'Expected "DEFINE QUERY <name>"' ) if last_def >= 0 : query = '\n' . join ( [ line for line in lines [ last_def : ] if len ( line ) ] ) . strip ( ) statement = datalab . data . SqlStatement ( query , module ) module . __dict__ [ name ] = statement module . __dict__ [ datalab . data . _utils . _SQL_MODULE_LAST ] = statement if code is None : code = '' module . __dict__ [ datalab . data . _utils . _SQL_MODULE_ARGPARSE ] = _arguments ( code , module ) return module . __dict__ . get ( datalab . data . _utils . _SQL_MODULE_LAST , None )
Split a hybrid %%sql cell into the Python code and the queries .
49,064
def sql_cell ( args , cell ) : name = args [ 'module' ] if args [ 'module' ] else '_sql_cell' module = imp . new_module ( name ) query = _split_cell ( cell , module ) ipy = IPython . get_ipython ( ) if not args [ 'module' ] : if query : return datalab . bigquery . Query ( query , values = ipy . user_ns ) . execute ( dialect = args [ 'dialect' ] , billing_tier = args [ 'billing' ] ) . results else : sys . modules [ name ] = module exec ( 'import %s' % name , ipy . user_ns )
Implements the SQL cell magic for ipython notebooks .
49,065
def get_reader_input_fn ( train_config , preprocess_output_dir , model_type , data_paths , batch_size , shuffle , num_epochs = None ) : def get_input_features ( ) : _ , examples = util . read_examples ( input_files = data_paths , batch_size = batch_size , shuffle = shuffle , num_epochs = num_epochs ) features = util . parse_example_tensor ( examples = examples , train_config = train_config , keep_target = True ) target_name = train_config [ 'target_column' ] target = features . pop ( target_name ) features , target = util . preprocess_input ( features = features , target = target , train_config = train_config , preprocess_output_dir = preprocess_output_dir , model_type = model_type ) return features , target return get_input_features
Builds input layer for training .
49,066
def main ( argv = None ) : args = parse_arguments ( sys . argv if argv is None else argv ) tf . logging . set_verbosity ( tf . logging . INFO ) learn_runner . run ( experiment_fn = get_experiment_fn ( args ) , output_dir = args . job_dir )
Run a Tensorflow model on the Iris dataset .
49,067
def sd ( line , cell = None ) : parser = google . datalab . utils . commands . CommandParser ( prog = '%sd' , description = ( 'Execute various Stackdriver related operations. Use "%sd ' '<stackdriver_product> -h" for help on a specific Stackdriver product.' ) ) _create_monitoring_subparser ( parser ) return google . datalab . utils . commands . handle_magic_line ( line , cell , parser )
Implements the stackdriver cell magic for ipython notebooks .
49,068
def make_prediction_output_tensors ( args , features , input_ops , model_fn_ops , keep_target ) : target_name = feature_transforms . get_target_name ( features ) key_names = get_key_names ( features ) outputs = { } outputs . update ( { key_name : tf . squeeze ( input_ops . features [ key_name ] ) for key_name in key_names } ) if is_classification_model ( args . model ) : class_names = read_vocab ( args , target_name ) table = tf . contrib . lookup . index_to_string_table_from_tensor ( mapping = class_names , default_value = 'UNKNOWN' ) if keep_target : input_target_label = table . lookup ( input_ops . features [ target_name ] ) outputs [ PG_TARGET ] = tf . squeeze ( input_target_label ) probabilities = model_fn_ops . predictions [ 'probabilities' ] if args . top_n == 0 : predicted_index = tf . argmax ( probabilities , axis = 1 ) predicted = table . lookup ( predicted_index ) outputs . update ( { PG_CLASSIFICATION_FIRST_LABEL : predicted } ) probabilities_list = tf . unstack ( probabilities , axis = 1 ) for class_name , p in zip ( class_names , probabilities_list ) : outputs [ class_name ] = p else : top_n = args . top_n ( top_k_values , top_k_indices ) = tf . nn . top_k ( probabilities , k = top_n ) top_k_labels = table . lookup ( tf . to_int64 ( top_k_indices ) ) num_digits = int ( math . ceil ( math . log ( top_n , 10 ) ) ) if num_digits == 0 : num_digits = 1 for i in range ( 0 , top_n ) : padded_i = str ( i + 1 ) . zfill ( num_digits ) if i == 0 : label_alias = PG_CLASSIFICATION_FIRST_LABEL else : label_alias = PG_CLASSIFICATION_LABEL_TEMPLATE % padded_i label_tensor_name = ( tf . squeeze ( tf . slice ( top_k_labels , [ 0 , i ] , [ tf . shape ( top_k_labels ) [ 0 ] , 1 ] ) ) ) if i == 0 : score_alias = PG_CLASSIFICATION_FIRST_SCORE else : score_alias = PG_CLASSIFICATION_SCORE_TEMPLATE % padded_i score_tensor_name = ( tf . squeeze ( tf . slice ( top_k_values , [ 0 , i ] , [ tf . shape ( top_k_values ) [ 0 ] , 1 ] ) ) ) outputs . update ( { label_alias : label_tensor_name , score_alias : score_tensor_name } ) else : if keep_target : outputs [ PG_TARGET ] = tf . squeeze ( input_ops . features [ target_name ] ) scores = model_fn_ops . predictions [ 'scores' ] outputs [ PG_REGRESSION_PREDICTED_TARGET ] = tf . squeeze ( scores ) return outputs
Makes the final prediction output layer .
49,069
def read_vocab ( args , column_name ) : vocab_path = os . path . join ( args . analysis , feature_transforms . VOCAB_ANALYSIS_FILE % column_name ) if not file_io . file_exists ( vocab_path ) : return [ ] vocab , _ = feature_transforms . read_vocab_file ( vocab_path ) return vocab
Reads a vocab file if it exists .
49,070
def get_item ( env , name , default = None ) : for key in name . split ( '.' ) : if isinstance ( env , dict ) and key in env : env = env [ key ] elif isinstance ( env , types . ModuleType ) and key in env . __dict__ : env = env . __dict__ [ key ] else : return default return env
Get an item from a dictionary handling nested lookups with dotted notation .
49,071
def predict ( model_dir , images ) : results = _tf_predict ( model_dir , images ) predicted_and_scores = [ ( predicted , label_scores [ list ( labels ) . index ( predicted ) ] ) for predicted , labels , label_scores in results ] return predicted_and_scores
Local instant prediction .
49,072
def configure_pipeline ( p , dataset , model_dir , output_csv , output_bq_table ) : data = _util . get_sources_from_dataset ( p , dataset , 'predict' ) if len ( dataset . schema ) == 2 : output_schema = [ { 'name' : 'image_url' , 'type' : 'STRING' } , { 'name' : 'target' , 'type' : 'STRING' } , { 'name' : 'predicted' , 'type' : 'STRING' } , { 'name' : 'target_prob' , 'type' : 'FLOAT' } , { 'name' : 'predicted_prob' , 'type' : 'FLOAT' } , ] else : output_schema = [ { 'name' : 'image_url' , 'type' : 'STRING' } , { 'name' : 'predicted' , 'type' : 'STRING' } , { 'name' : 'predicted_prob' , 'type' : 'FLOAT' } , ] results = ( data | 'Load Images' >> beam . ParDo ( LoadImagesDoFn ( ) ) | 'Batch Inputs' >> beam . ParDo ( EmitAsBatchDoFn ( 20 ) ) | 'Batch Predict' >> beam . ParDo ( PredictBatchDoFn ( model_dir ) ) | 'Unbatch' >> beam . ParDo ( UnbatchDoFn ( ) ) | 'Process Results' >> beam . ParDo ( ProcessResultsDoFn ( ) ) ) if output_csv is not None : schema_file = output_csv + '.schema.json' results_save = ( results | 'Prepare For Output' >> beam . ParDo ( MakeCsvLineDoFn ( ) ) | 'Write Csv Results' >> beam . io . textio . WriteToText ( output_csv , shard_name_template = '' ) ) ( results_save | 'Sample One' >> beam . transforms . combiners . Sample . FixedSizeGlobally ( 1 ) | 'Serialize Schema' >> beam . Map ( lambda path : json . dumps ( output_schema ) ) | 'Write Schema' >> beam . io . textio . WriteToText ( schema_file , shard_name_template = '' ) ) if output_bq_table is not None : bq_schema_string = ',' . join ( x [ 'name' ] + ':' + x [ 'type' ] for x in output_schema ) sink = beam . io . BigQuerySink ( output_bq_table , schema = bq_schema_string , write_disposition = beam . io . BigQueryDisposition . WRITE_TRUNCATE ) results | 'Write BQ Results' >> beam . io . Write ( sink )
Configures a dataflow pipeline for batch prediction .
49,073
def sampling_query ( sql , context , fields = None , count = 5 , sampling = None , udfs = None , data_sources = None ) : return Query ( _sampling . Sampling . sampling_query ( sql , fields , count , sampling ) , context = context , udfs = udfs , data_sources = data_sources )
Returns a sampling Query for the SQL object .
49,074
def results ( self , use_cache = True , dialect = None , billing_tier = None ) : if not use_cache or ( self . _results is None ) : self . execute ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier ) return self . _results . results
Retrieves table of results for the query . May block if the query must be executed first .
49,075
def extract ( self , storage_uris , format = 'csv' , csv_delimiter = ',' , csv_header = True , compress = False , use_cache = True , dialect = None , billing_tier = None ) : return self . results ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier ) . extract ( storage_uris , format = format , csv_delimiter = csv_delimiter , csv_header = csv_header , compress = compress )
Exports the query results to GCS .
49,076
def to_dataframe ( self , start_row = 0 , max_rows = None , use_cache = True , dialect = None , billing_tier = None ) : return self . results ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier ) . to_dataframe ( start_row = start_row , max_rows = max_rows )
Exports the query results to a Pandas dataframe .
49,077
def sample ( self , count = 5 , fields = None , sampling = None , use_cache = True , dialect = None , billing_tier = None ) : return Query . sampling_query ( self . _sql , self . _context , count = count , fields = fields , sampling = sampling , udfs = self . _udfs , data_sources = self . _data_sources ) . results ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier )
Retrieves a sampling of rows for the query .
49,078
def execute ( self , table_name = None , table_mode = 'create' , use_cache = True , priority = 'interactive' , allow_large_results = False , dialect = None , billing_tier = None ) : job = self . execute_async ( table_name = table_name , table_mode = table_mode , use_cache = use_cache , priority = priority , allow_large_results = allow_large_results , dialect = dialect , billing_tier = billing_tier ) self . _results = job . wait ( ) return self . _results
Initiate the query blocking until complete and then return the results .
49,079
def to_view ( self , view_name ) : from . import _view return _view . View ( view_name , self . _context ) . create ( self . _sql )
Create a View from this Query .
49,080
def format_help ( self ) : if not self . _cell_args : return super ( CommandParser , self ) . format_help ( ) else : epilog = self . epilog self . epilog = None orig_help = super ( CommandParser , self ) . format_help ( ) cell_args_help = '\nCell args:\n\n' for cell_arg , v in six . iteritems ( self . _cell_args ) : required = 'Required' if v [ 'required' ] else 'Optional' cell_args_help += '%s: %s. %s.\n\n' % ( cell_arg , required , v [ 'help' ] ) orig_help += cell_args_help if epilog : orig_help += epilog + '\n\n' return orig_help
Override help doc to add cell args .
49,081
def _get_subparsers ( self ) : subparsers = [ ] for action in self . _actions : if isinstance ( action , argparse . _SubParsersAction ) : for _ , subparser in action . choices . items ( ) : subparsers . append ( subparser ) ret = subparsers for sp in subparsers : ret += sp . _get_subparsers ( ) return ret
Recursively get subparsers .
49,082
def _get_subparser_line_args ( self , subparser_prog ) : subparsers = self . _get_subparsers ( ) for subparser in subparsers : if subparser_prog == subparser . prog : args_to_parse = [ ] for action in subparser . _actions : if action . option_strings : for argname in action . option_strings : if argname . startswith ( '--' ) : args_to_parse . append ( argname [ 2 : ] ) return args_to_parse return None
Get line args of a specified subparser by its prog .
49,083
def _get_subparser_cell_args ( self , subparser_prog ) : subparsers = self . _get_subparsers ( ) for subparser in subparsers : if subparser_prog == subparser . prog : return subparser . _cell_args return None
Get cell args of a specified subparser by its prog .
49,084
def add_cell_argument ( self , name , help , required = False ) : for action in self . _actions : if action . dest == name : raise ValueError ( 'Arg "%s" was added by add_argument already.' % name ) self . _cell_args [ name ] = { 'required' : required , 'help' : help }
Add a cell only argument .
49,085
def parse ( self , line , cell , namespace = None ) : if namespace is None : ipy = IPython . get_ipython ( ) namespace = ipy . user_ns args = CommandParser . create_args ( line , namespace ) sub_parsers_progs = [ x . prog for x in self . _get_subparsers ( ) ] matched_progs = [ ] for prog in sub_parsers_progs : match = prog . split ( ) [ 1 : ] for i in range ( len ( args ) ) : if args [ i : i + len ( match ) ] == match : matched_progs . append ( prog ) break matched_prog = None if matched_progs : matched_prog = max ( matched_progs , key = lambda x : len ( x . split ( ) ) ) line_args = self . _get_subparser_line_args ( matched_prog ) if line_args : cell_config = None try : cell_config , cell = google . datalab . utils . commands . parse_config_for_selected_keys ( cell , line_args ) except : pass if cell_config : google . datalab . utils . commands . replace_vars ( cell_config , namespace ) for arg_name in cell_config : arg_value = cell_config [ arg_name ] if arg_value is None : continue if '--' + arg_name in args : raise ValueError ( 'config item "%s" is specified in both cell and line.' % arg_name ) if isinstance ( arg_value , bool ) : if arg_value : line += ' --%s' % arg_name else : line += ' --%s %s' % ( arg_name , str ( cell_config [ arg_name ] ) ) args = CommandParser . create_args ( line , namespace ) args = vars ( self . parse_args ( args ) ) cell_config = None cell_args = self . _get_subparser_cell_args ( matched_prog ) if cell_args : try : cell_config , _ = google . datalab . utils . commands . parse_config_for_selected_keys ( cell , cell_args ) except : pass if cell_config : google . datalab . utils . commands . replace_vars ( cell_config , namespace ) for arg in cell_args : if ( cell_args [ arg ] [ 'required' ] and ( cell_config is None or cell_config . get ( arg , None ) is None ) ) : raise ValueError ( 'Cell config "%s" is required.' % arg ) if cell_config : args . update ( cell_config ) return args , cell
Parses a line and cell into a dictionary of arguments expanding variables from a namespace .
49,086
def _glob_events_files ( self , paths , recursive ) : event_files = [ ] for path in paths : dirs = tf . gfile . Glob ( path ) dirs = filter ( lambda x : tf . gfile . IsDirectory ( x ) , dirs ) for dir in dirs : if recursive : dir_files_pair = [ ( root , filenames ) for root , _ , filenames in tf . gfile . Walk ( dir ) ] else : dir_files_pair = [ ( dir , tf . gfile . ListDirectory ( dir ) ) ] for root , filenames in dir_files_pair : file_names = fnmatch . filter ( filenames , '*.tfevents.*' ) file_paths = [ os . path . join ( root , x ) for x in file_names ] file_paths = filter ( lambda x : not tf . gfile . IsDirectory ( x ) , file_paths ) event_files += file_paths return event_files
Find all tf events files under a list of paths recursively .
49,087
def list_events ( self ) : event_dir_dict = collections . defaultdict ( set ) for event_file in self . _glob_events_files ( self . _paths , recursive = True ) : dir = os . path . dirname ( event_file ) try : for record in tf_record . tf_record_iterator ( event_file ) : event = event_pb2 . Event . FromString ( record ) if event . summary is None or event . summary . value is None : continue for value in event . summary . value : if value . simple_value is None or value . tag is None : continue event_dir_dict [ value . tag ] . add ( dir ) except tf . errors . DataLossError : continue return dict ( event_dir_dict )
List all scalar events in the directory .
49,088
def from_storage ( source , source_format = 'csv' , csv_options = None , ignore_unknown_values = False , max_bad_records = 0 , compressed = False , schema = None ) : result = FederatedTable ( ) if source_format == 'csv' : result . _bq_source_format = 'CSV' if csv_options is None : csv_options = _csv_options . CSVOptions ( ) elif source_format == 'json' : if csv_options : raise Exception ( 'CSV options are not support for JSON tables' ) result . _bq_source_format = 'NEWLINE_DELIMITED_JSON' else : raise Exception ( "Invalid source format %s" % source_format ) result . _source = source if isinstance ( source , list ) else [ source ] result . _source_format = source_format result . _csv_options = csv_options result . _ignore_unknown_values = ignore_unknown_values result . _max_bad_records = max_bad_records result . _compressed = compressed result . _schema = schema return result
Create an external table for a GCS object .
49,089
def get_query_parameters ( args , cell_body , date_time = datetime . datetime . now ( ) ) : env = google . datalab . utils . commands . notebook_environment ( ) config = google . datalab . utils . commands . parse_config ( cell_body , env = env , as_dict = False ) sql = args [ 'query' ] if sql is None : raise Exception ( 'Cannot extract query parameters in non-query cell' ) if config : jsonschema . validate ( config , BigQuerySchema . QUERY_PARAMS_SCHEMA ) config = config or { } config_parameters = config . get ( 'parameters' , [ ] ) return bigquery . Query . get_query_parameters ( config_parameters , date_time = date_time )
Extract query parameters from cell body if provided Also validates the cell body schema using jsonschema to catch errors before sending the http request . This validation isn t complete however ; it does not validate recursive schemas but it acts as a good filter against most simple schemas
49,090
def _udf_cell ( args , cell_body ) : udf_name = args [ 'name' ] if not udf_name : raise Exception ( 'Declaration must be of the form %%bq udf --name <variable name>' ) param_pattern = r'^\s*\/\/\s*@param\s+([<>\w]+)\s+([<>\w,\s]+)\s*$' returns_pattern = r'^\s*\/\/\s*@returns\s+([<>\w,\s]+)\s*$' import_pattern = r'^\s*\/\/\s*@import\s+(\S+)\s*$' params = re . findall ( param_pattern , cell_body , re . MULTILINE ) return_type = re . findall ( returns_pattern , cell_body , re . MULTILINE ) imports = re . findall ( import_pattern , cell_body , re . MULTILINE ) if len ( return_type ) < 1 : raise Exception ( 'UDF return type must be defined using // @returns <type>' ) if len ( return_type ) > 1 : raise Exception ( 'Found more than one return type definition' ) return_type = return_type [ 0 ] udf = bigquery . UDF ( udf_name , cell_body , return_type , params , args [ 'language' ] , imports ) google . datalab . utils . commands . notebook_environment ( ) [ udf_name ] = udf
Implements the Bigquery udf cell magic for ipython notebooks .
49,091
def _datasource_cell ( args , cell_body ) : name = args [ 'name' ] paths = args [ 'paths' ] data_format = ( args [ 'format' ] or 'CSV' ) . lower ( ) compressed = args [ 'compressed' ] or False record = google . datalab . utils . commands . parse_config ( cell_body , google . datalab . utils . commands . notebook_environment ( ) , as_dict = False ) jsonschema . validate ( record , BigQuerySchema . TABLE_SCHEMA_SCHEMA ) schema = bigquery . Schema ( record [ 'schema' ] ) datasource = bigquery . ExternalDataSource ( source = paths , source_format = data_format , compressed = compressed , schema = schema ) google . datalab . utils . commands . notebook_environment ( ) [ name ] = datasource
Implements the BigQuery datasource cell magic for ipython notebooks .
49,092
def _query_cell ( args , cell_body ) : name = args [ 'name' ] udfs = args [ 'udfs' ] datasources = args [ 'datasources' ] subqueries = args [ 'subqueries' ] query = bigquery . Query ( cell_body , env = IPython . get_ipython ( ) . user_ns , udfs = udfs , data_sources = datasources , subqueries = subqueries ) if name is None : return query . execute ( ) . result ( ) else : google . datalab . utils . commands . notebook_environment ( ) [ name ] = query
Implements the BigQuery cell magic for used to build SQL objects .
49,093
def _get_table ( name ) : item = google . datalab . utils . commands . get_notebook_item ( name ) if isinstance ( item , bigquery . Table ) : return item try : return _existing_table_cache [ name ] except KeyError : table = bigquery . Table ( name ) if table . exists ( ) : _existing_table_cache [ name ] = table return table return None
Given a variable or table name get a Table if it exists .
49,094
def _render_list ( data ) : return IPython . core . display . HTML ( google . datalab . utils . commands . HtmlBuilder . render_list ( data ) )
Helper to render a list of objects as an HTML list object .
49,095
def _dataset_line ( args ) : if args [ 'command' ] == 'list' : filter_ = args [ 'filter' ] if args [ 'filter' ] else '*' context = google . datalab . Context . default ( ) if args [ 'project' ] : context = google . datalab . Context ( args [ 'project' ] , context . credentials ) return _render_list ( [ str ( dataset ) for dataset in bigquery . Datasets ( context ) if fnmatch . fnmatch ( str ( dataset ) , filter_ ) ] ) elif args [ 'command' ] == 'create' : try : bigquery . Dataset ( args [ 'name' ] ) . create ( friendly_name = args [ 'friendly' ] ) except Exception as e : print ( 'Failed to create dataset %s: %s' % ( args [ 'name' ] , e ) ) elif args [ 'command' ] == 'delete' : try : bigquery . Dataset ( args [ 'name' ] ) . delete ( ) except Exception as e : print ( 'Failed to delete dataset %s: %s' % ( args [ 'name' ] , e ) )
Implements the BigQuery dataset magic subcommand used to operate on datasets
49,096
def _table_cell ( args , cell_body ) : if args [ 'command' ] == 'list' : filter_ = args [ 'filter' ] if args [ 'filter' ] else '*' if args [ 'dataset' ] : if args [ 'project' ] is None : datasets = [ bigquery . Dataset ( args [ 'dataset' ] ) ] else : context = google . datalab . Context ( args [ 'project' ] , google . datalab . Context . default ( ) . credentials ) datasets = [ bigquery . Dataset ( args [ 'dataset' ] , context ) ] else : default_context = google . datalab . Context . default ( ) context = google . datalab . Context ( default_context . project_id , default_context . credentials ) if args [ 'project' ] : context . set_project_id ( args [ 'project' ] ) datasets = bigquery . Datasets ( context ) tables = [ ] for dataset in datasets : tables . extend ( [ table . full_name for table in dataset if fnmatch . fnmatch ( table . full_name , filter_ ) ] ) return _render_list ( tables ) elif args [ 'command' ] == 'create' : if cell_body is None : print ( 'Failed to create %s: no schema specified' % args [ 'name' ] ) else : try : record = google . datalab . utils . commands . parse_config ( cell_body , google . datalab . utils . commands . notebook_environment ( ) , as_dict = False ) jsonschema . validate ( record , BigQuerySchema . TABLE_SCHEMA_SCHEMA ) schema = bigquery . Schema ( record [ 'schema' ] ) bigquery . Table ( args [ 'name' ] ) . create ( schema = schema , overwrite = args [ 'overwrite' ] ) except Exception as e : print ( 'Failed to create table %s: %s' % ( args [ 'name' ] , e ) ) elif args [ 'command' ] == 'describe' : name = args [ 'name' ] table = _get_table ( name ) if not table : raise Exception ( 'Could not find table %s' % name ) html = _repr_html_table_schema ( table . schema ) return IPython . core . display . HTML ( html ) elif args [ 'command' ] == 'delete' : try : bigquery . Table ( args [ 'name' ] ) . delete ( ) except Exception as e : print ( 'Failed to delete table %s: %s' % ( args [ 'name' ] , e ) ) elif args [ 'command' ] == 'view' : name = args [ 'name' ] table = _get_table ( name ) if not table : raise Exception ( 'Could not find table %s' % name ) return table
Implements the BigQuery table magic subcommand used to operate on tables
49,097
def _extract_cell ( args , cell_body ) : env = google . datalab . utils . commands . notebook_environment ( ) config = google . datalab . utils . commands . parse_config ( cell_body , env , False ) or { } parameters = config . get ( 'parameters' ) if args [ 'table' ] : table = google . datalab . bigquery . Query . resolve_parameters ( args [ 'table' ] , parameters ) source = _get_table ( table ) if not source : raise Exception ( 'Could not find table %s' % table ) csv_delimiter = args [ 'delimiter' ] if args [ 'format' ] == 'csv' else None path = google . datalab . bigquery . Query . resolve_parameters ( args [ 'path' ] , parameters ) job = source . extract ( path , format = args [ 'format' ] , csv_delimiter = csv_delimiter , csv_header = args [ 'header' ] , compress = args [ 'compress' ] ) elif args [ 'query' ] or args [ 'view' ] : source_name = args [ 'view' ] or args [ 'query' ] source = google . datalab . utils . commands . get_notebook_item ( source_name ) if not source : raise Exception ( 'Could not find ' + ( 'view ' + args [ 'view' ] if args [ 'view' ] else 'query ' + args [ 'query' ] ) ) query = source if args [ 'query' ] else bigquery . Query . from_view ( source ) query_params = get_query_parameters ( args , cell_body ) if args [ 'query' ] else None output_options = QueryOutput . file ( path = args [ 'path' ] , format = args [ 'format' ] , csv_delimiter = args [ 'delimiter' ] , csv_header = args [ 'header' ] , compress = args [ 'compress' ] , use_cache = not args [ 'nocache' ] ) context = google . datalab . utils . _utils . _construct_context_for_args ( args ) job = query . execute ( output_options , context = context , query_params = query_params ) else : raise Exception ( 'A query, table, or view is needed to extract' ) if job . failed : raise Exception ( 'Extract failed: %s' % str ( job . fatal_error ) ) elif job . errors : raise Exception ( 'Extract completed with errors: %s' % str ( job . errors ) ) return job . result ( )
Implements the BigQuery extract magic used to extract query or table data to GCS .
49,098
def bq ( line , cell = None ) : return google . datalab . utils . commands . handle_magic_line ( line , cell , _bigquery_parser )
Implements the bq cell magic for ipython notebooks .
49,099
def _table_viewer ( table , rows_per_page = 25 , fields = None ) : if not table . exists ( ) : raise Exception ( 'Table %s does not exist' % table . full_name ) if not table . is_listable ( ) : return "Done" _HTML_TEMPLATE = u if fields is None : fields = google . datalab . utils . commands . get_field_list ( fields , table . schema ) div_id = google . datalab . utils . commands . Html . next_id ( ) meta_count = ( 'rows: %d' % table . length ) if table . length >= 0 else '' meta_name = table . full_name if table . job is None else ( 'job: %s' % table . job . id ) if table . job : if table . job . cache_hit : meta_cost = 'cached' else : bytes = bigquery . _query_stats . QueryStats . _size_formatter ( table . job . bytes_processed ) meta_cost = '%s processed' % bytes meta_time = 'time: %.1fs' % table . job . total_time else : meta_cost = '' meta_time = '' data , total_count = google . datalab . utils . commands . get_data ( table , fields , first_row = 0 , count = rows_per_page ) if total_count < 0 : fetched_count = len ( data [ 'rows' ] ) if fetched_count < rows_per_page : total_count = fetched_count chart = 'table' if 0 <= total_count <= rows_per_page else 'paged_table' meta_entries = [ meta_count , meta_time , meta_cost , meta_name ] meta_data = '(%s)' % ( ', ' . join ( [ entry for entry in meta_entries if len ( entry ) ] ) ) return _HTML_TEMPLATE . format ( div_id = div_id , static_table = google . datalab . utils . commands . HtmlBuilder . render_chart_data ( data ) , meta_data = meta_data , chart_style = chart , source_index = google . datalab . utils . commands . get_data_source_index ( table . full_name ) , fields = ',' . join ( fields ) , total_rows = total_count , rows_per_page = rows_per_page , data = json . dumps ( data , cls = google . datalab . utils . JSONEncoder ) )
Return a table viewer .