idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
48,700 | def is_in_data_type_range ( self , raise_exception = True ) : return self . _header . data_type . is_in_range ( self . _values , self . _header . unit , raise_exception ) | Check if collection values are in physically possible ranges for the data_type . |
48,701 | def get_highest_values ( self , count ) : count = int ( count ) assert count <= len ( self . _values ) , 'count must be smaller than or equal to values length. {} > {}.' . format ( count , len ( self . _values ) ) assert count > 0 , 'count must be greater than 0. Got {}.' . format ( count ) highest_values = sorted ( self . _values , reverse = True ) [ 0 : count ] highest_values_index = sorted ( list ( xrange ( len ( self . _values ) ) ) , key = lambda k : self . _values [ k ] , reverse = True ) [ 0 : count ] return highest_values , highest_values_index | Get a list of the the x highest values of the Data Collection and their indices . |
48,702 | def get_lowest_values ( self , count ) : count = int ( count ) assert count <= len ( self . _values ) , 'count must be <= to Data Collection len. {} > {}.' . format ( count , len ( self . _values ) ) assert count > 0 , 'count must be greater than 0. Got {}.' . format ( count ) lowest_values = sorted ( self . _values ) [ 0 : count ] lowest_values_index = sorted ( list ( xrange ( len ( self . _values ) ) ) , key = lambda k : self . _values [ k ] ) [ 0 : count ] return lowest_values , lowest_values_index | Get a list of the the x lowest values of the Data Collection and their indices . |
48,703 | def get_percentile ( self , percentile ) : assert 0 <= percentile <= 100 , 'percentile must be between 0 and 100. Got {}' . format ( percentile ) return self . _percentile ( self . _values , percentile ) | Get a value representing a the input percentile of the Data Collection . |
48,704 | def get_aligned_collection ( self , value = 0 , data_type = None , unit = None , mutable = None ) : header = self . _check_aligned_header ( data_type , unit ) values = self . _check_aligned_value ( value ) if mutable is None : collection = self . __class__ ( header , values , self . datetimes ) else : if self . _enumeration is None : self . _get_mutable_enumeration ( ) if mutable is False : col_obj = self . _enumeration [ 'immutable' ] [ self . _collection_type ] else : col_obj = self . _enumeration [ 'mutable' ] [ self . _collection_type ] collection = col_obj ( header , values , self . datetimes ) collection . _validated_a_period = self . _validated_a_period return collection | Return a Collection aligned with this one composed of one repeated value . |
48,705 | def duplicate ( self ) : collection = self . __class__ ( self . header . duplicate ( ) , self . values , self . datetimes ) collection . _validated_a_period = self . _validated_a_period return collection | Return a copy of the current Data Collection . |
48,706 | def to_json ( self ) : return { 'header' : self . header . to_json ( ) , 'values' : self . _values , 'datetimes' : self . datetimes , 'validated_a_period' : self . _validated_a_period } | Convert Data Collection to a dictionary . |
48,707 | def filter_collections_by_statement ( data_collections , statement ) : pattern = BaseCollection . pattern_from_collections_and_statement ( data_collections , statement ) collections = [ coll . filter_by_pattern ( pattern ) for coll in data_collections ] return collections | Generate a filtered data collections according to a conditional statement . |
48,708 | def pattern_from_collections_and_statement ( data_collections , statement ) : BaseCollection . are_collections_aligned ( data_collections ) correct_var = BaseCollection . _check_conditional_statement ( statement , len ( data_collections ) ) num_statement_clean = BaseCollection . _replace_operators ( statement ) pattern = [ ] for i in xrange ( len ( data_collections [ 0 ] ) ) : num_statement = num_statement_clean for j , coll in enumerate ( data_collections ) : var = correct_var [ j ] num_statement = num_statement . replace ( var , str ( coll [ i ] ) ) num_statement = BaseCollection . _restore_operators ( num_statement ) pattern . append ( eval ( num_statement , { } ) ) return pattern | Generate a list of booleans from data collections and a conditional statement . |
48,709 | def are_collections_aligned ( data_collections , raise_exception = True ) : if len ( data_collections ) > 1 : first_coll = data_collections [ 0 ] for coll in data_collections [ 1 : ] : if not first_coll . is_collection_aligned ( coll ) : if raise_exception is True : error_msg = '{} Data Collection is not aligned with ' '{} Data Collection.' . format ( first_coll . header . data_type , coll . header . data_type ) raise ValueError ( error_msg ) return False return True | Test if a series of Data Collections are aligned with one another . |
48,710 | def compute_function_aligned ( funct , data_collections , data_type , unit ) : data_colls = [ ] for i , func_input in enumerate ( data_collections ) : if isinstance ( func_input , BaseCollection ) : data_colls . append ( func_input ) else : try : data_collections [ i ] = float ( func_input ) except ValueError : raise TypeError ( 'Expected a number or a Data Colleciton. ' 'Got {}' . format ( type ( func_input ) ) ) if len ( data_colls ) == 0 : return funct ( * data_collections ) else : BaseCollection . are_collections_aligned ( data_colls ) val_len = len ( data_colls [ 0 ] . values ) for i , col in enumerate ( data_collections ) : data_collections [ i ] = [ col ] * val_len if isinstance ( col , float ) else col result = data_colls [ 0 ] . get_aligned_collection ( data_type = data_type , unit = unit ) for i in xrange ( val_len ) : result [ i ] = funct ( * [ col [ i ] for col in data_collections ] ) return result | Compute a function with a list of aligned data collections or individual values . |
48,711 | def _check_conditional_statement ( statement , num_collections ) : correct_var = list ( ascii_lowercase ) [ : num_collections ] st_statement = BaseCollection . _remove_operators ( statement ) parsed_st = [ s for s in st_statement if s . isalpha ( ) ] for var in parsed_st : if var not in correct_var : raise ValueError ( 'Invalid conditional statement: {}\n ' 'Statement should be a valid Python statement' ' and the variables should be named as follows: {}' . format ( statement , ', ' . join ( correct_var ) ) ) return correct_var | Method to check conditional statements to be sure that they are valid . |
48,712 | def _filter_by_statement ( self , statement ) : self . __class__ . _check_conditional_statement ( statement , 1 ) _filt_values , _filt_datetimes = [ ] , [ ] for i , a in enumerate ( self . _values ) : if eval ( statement , { 'a' : a } ) : _filt_values . append ( a ) _filt_datetimes . append ( self . datetimes [ i ] ) return _filt_values , _filt_datetimes | Filter the data collection based on a conditional statement . |
48,713 | def _filter_by_pattern ( self , pattern ) : try : _len = len ( pattern ) except TypeError : raise TypeError ( "pattern is not a list of Booleans. Got {}" . format ( type ( pattern ) ) ) _filt_values = [ d for i , d in enumerate ( self . _values ) if pattern [ i % _len ] ] _filt_datetimes = [ d for i , d in enumerate ( self . datetimes ) if pattern [ i % _len ] ] return _filt_values , _filt_datetimes | Filter the Filter the Data Collection based on a list of booleans . |
48,714 | def _check_aligned_header ( self , data_type , unit ) : if data_type is not None : assert isinstance ( data_type , DataTypeBase ) , 'data_type must be a Ladybug DataType. Got {}' . format ( type ( data_type ) ) if unit is None : unit = data_type . units [ 0 ] else : data_type = self . header . data_type unit = unit or self . header . unit return Header ( data_type , unit , self . header . analysis_period , self . header . metadata ) | Check the header inputs whenever get_aligned_collection is called . |
48,715 | def _check_aligned_value ( self , value ) : if isinstance ( value , Iterable ) and not isinstance ( value , ( str , dict , bytes , bytearray ) ) : assert len ( value ) == len ( self . _values ) , "Length of value ({}) must match " "the length of this collection's values ({})" . format ( len ( value ) , len ( self . _values ) ) values = value else : values = [ value ] * len ( self . _values ) return values | Check the value input whenever get_aligned_collection is called . |
48,716 | def from_json ( cls , data ) : if 'month' not in data : data [ 'month' ] = 1 if 'day' not in data : data [ 'day' ] = 1 if 'hour' not in data : data [ 'hour' ] = 0 if 'minute' not in data : data [ 'minute' ] = 0 if 'year' not in data : data [ 'year' ] = 2017 leap_year = True if int ( data [ 'year' ] ) == 2016 else False return cls ( data [ 'month' ] , data [ 'day' ] , data [ 'hour' ] , data [ 'minute' ] , leap_year ) | Creat datetime from a dictionary . |
48,717 | def from_hoy ( cls , hoy , leap_year = False ) : return cls . from_moy ( round ( hoy * 60 ) , leap_year ) | Create Ladybug Datetime from an hour of the year . |
48,718 | def from_moy ( cls , moy , leap_year = False ) : if not leap_year : num_of_minutes_until_month = ( 0 , 44640 , 84960 , 129600 , 172800 , 217440 , 260640 , 305280 , 349920 , 393120 , 437760 , 480960 , 525600 ) else : num_of_minutes_until_month = ( 0 , 44640 , 84960 + 1440 , 129600 + 1440 , 172800 + 1440 , 217440 + 1440 , 260640 + 1440 , 305280 + 1440 , 349920 + 1440 , 393120 + 1440 , 437760 + 1440 , 480960 + 1440 , 525600 + 1440 ) for monthCount in range ( 12 ) : if int ( moy ) < num_of_minutes_until_month [ monthCount + 1 ] : month = monthCount + 1 break try : day = int ( ( moy - num_of_minutes_until_month [ month - 1 ] ) / ( 60 * 24 ) ) + 1 except UnboundLocalError : raise ValueError ( "moy must be positive and smaller than 525600. Invalid input %d" % ( moy ) ) else : hour = int ( ( moy / 60 ) % 24 ) minute = int ( moy % 60 ) return cls ( month , day , hour , minute , leap_year ) | Create Ladybug Datetime from a minute of the year . |
48,719 | def from_date_time_string ( cls , datetime_string , leap_year = False ) : dt = datetime . strptime ( datetime_string , '%d %b %H:%M' ) return cls ( dt . month , dt . day , dt . hour , dt . minute , leap_year ) | Create Ladybug DateTime from a DateTime string . |
48,720 | def _calculate_hour_and_minute ( float_hour ) : hour , minute = int ( float_hour ) , int ( round ( ( float_hour - int ( float_hour ) ) * 60 ) ) if minute == 60 : return hour + 1 , 0 else : return hour , minute | Calculate hour and minutes as integers from a float hour . |
48,721 | def add_minute ( self , minute ) : _moy = self . moy + int ( minute ) return self . __class__ . from_moy ( _moy ) | Create a new DateTime after the minutes are added . |
48,722 | def to_json ( self ) : return { 'year' : self . year , 'month' : self . month , 'day' : self . day , 'hour' : self . hour , 'minute' : self . minute } | Get date time as a dictionary . |
48,723 | def fullConn ( self , preCellsTags , postCellsTags , connParam ) : from . . import sim if sim . cfg . verbose : print ( 'Generating set of all-to-all connections (rule: %s) ...' % ( connParam [ 'label' ] ) ) paramsStrFunc = [ param for param in [ p + 'Func' for p in self . connStringFuncParams ] if param in connParam ] for paramStrFunc in paramsStrFunc : connParam [ paramStrFunc [ : - 4 ] + 'List' ] = { ( preGid , postGid ) : connParam [ paramStrFunc ] ( ** { k : v if isinstance ( v , Number ) else v ( preCellTags , postCellTags ) for k , v in connParam [ paramStrFunc + 'Vars' ] . items ( ) } ) for preGid , preCellTags in preCellsTags . items ( ) for postGid , postCellTags in postCellsTags . items ( ) } for postCellGid in postCellsTags : if postCellGid in self . gid2lid : for preCellGid , preCellTags in preCellsTags . items ( ) : self . _addCellConn ( connParam , preCellGid , postCellGid ) | Generates connections between all pre and post - syn cells |
48,724 | def fromListConn ( self , preCellsTags , postCellsTags , connParam ) : from . . import sim if sim . cfg . verbose : print ( 'Generating set of connections from list (rule: %s) ...' % ( connParam [ 'label' ] ) ) orderedPreGids = sorted ( preCellsTags ) orderedPostGids = sorted ( postCellsTags ) paramsStrFunc = [ param for param in [ p + 'Func' for p in self . connStringFuncParams ] if param in connParam ] for paramStrFunc in paramsStrFunc : connParam [ paramStrFunc [ : - 4 ] + 'List' ] = { ( orderedPreGids [ preId ] , orderedPostGids [ postId ] ) : connParam [ paramStrFunc ] ( ** { k : v if isinstance ( v , Number ) else v ( preCellsTags [ orderedPreGids [ preId ] ] , postCellsTags [ orderedPostGids [ postId ] ] ) for k , v in connParam [ paramStrFunc + 'Vars' ] . items ( ) } ) for preId , postId in connParam [ 'connList' ] } if 'weight' in connParam and isinstance ( connParam [ 'weight' ] , list ) : connParam [ 'weightFromList' ] = list ( connParam [ 'weight' ] ) if 'delay' in connParam and isinstance ( connParam [ 'delay' ] , list ) : connParam [ 'delayFromList' ] = list ( connParam [ 'delay' ] ) if 'loc' in connParam and isinstance ( connParam [ 'loc' ] , list ) : connParam [ 'locFromList' ] = list ( connParam [ 'loc' ] ) for iconn , ( relativePreId , relativePostId ) in enumerate ( connParam [ 'connList' ] ) : preCellGid = orderedPreGids [ relativePreId ] postCellGid = orderedPostGids [ relativePostId ] if postCellGid in self . gid2lid : if 'weightFromList' in connParam : connParam [ 'weight' ] = connParam [ 'weightFromList' ] [ iconn ] if 'delayFromList' in connParam : connParam [ 'delay' ] = connParam [ 'delayFromList' ] [ iconn ] if 'locFromList' in connParam : connParam [ 'loc' ] = connParam [ 'locFromList' ] [ iconn ] if preCellGid != postCellGid : self . _addCellConn ( connParam , preCellGid , postCellGid ) | Generates connections between all pre and post - syn cells based list of relative cell ids |
48,725 | def setImembPtr ( self ) : jseg = 0 for sec in list ( self . secs . values ( ) ) : hSec = sec [ 'hObj' ] for iseg , seg in enumerate ( hSec ) : self . imembPtr . pset ( jseg , seg . _ref_i_membrane_ ) jseg += 1 | Set PtrVector to point to the i_membrane_ |
48,726 | def saveWeights ( sim ) : with open ( sim . weightsfilename , 'w' ) as fid : for weightdata in sim . allWeights : fid . write ( '%0.0f' % weightdata [ 0 ] ) for i in range ( 1 , len ( weightdata ) ) : fid . write ( '\t%0.8f' % weightdata [ i ] ) fid . write ( '\n' ) print ( ( 'Saved weights as %s' % sim . weightsfilename ) ) | Save the weights for each plastic synapse |
48,727 | def validateFunction ( strFunc , netParamsVars ) : from math import exp , log , sqrt , sin , cos , tan , asin , acos , atan , sinh , cosh , tanh , pi , e rand = h . Random ( ) stringFuncRandMethods = [ 'binomial' , 'discunif' , 'erlang' , 'geometric' , 'hypergeo' , 'lognormal' , 'negexp' , 'normal' , 'poisson' , 'uniform' , 'weibull' ] for randmeth in stringFuncRandMethods : strFunc = strFunc . replace ( randmeth , 'rand.' + randmeth ) variables = { "pre_x" : 1 , "pre_y" : 1 , "pre_z" : 1 , "post_x" : 1 , "post_y" : 1 , "post_z" : 1 , "dist_x" : 1 , "dist_y" : 1 , "dist_z" : 1 , "pre_xnorm" : 1 , "pre_ynorm" : 1 , "pre_znorm" : 1 , "post_xnorm" : 1 , "post_ynorm" : 1 , "post_znorm" : 1 , "dist_xnorm" : 1 , "dist_ynorm" : 1 , "dist_znorm" : 1 , "dist_3D" : 1 , "dist_3D_border" : 1 , "dist_2D" : 1 , "dist_norm3D" : 1 , "dist_norm2D" : 1 , "rand" : rand , "exp" : exp , "log" : log , "sqrt" : sqrt , "sin" : sin , "cos" : cos , "tan" : tan , "asin" : asin , "acos" : acos , "atan" : atan , "sinh" : sinh , "cosh" : cosh , "tanh" : tanh , "pi" : pi , "e" : e } for k , v in netParamsVars . items ( ) : if isinstance ( v , Number ) : variables [ k ] = v try : eval ( strFunc , variables ) return True except : return False | returns True if strFunc can be evaluated |
48,728 | def bandpass ( data , freqmin , freqmax , df , corners = 4 , zerophase = True ) : fe = 0.5 * df low = freqmin / fe high = freqmax / fe if high - 1.0 > - 1e-6 : msg = ( "Selected high corner frequency ({}) of bandpass is at or " "above Nyquist ({}). Applying a high-pass instead." ) . format ( freqmax , fe ) warnings . warn ( msg ) return highpass ( data , freq = freqmin , df = df , corners = corners , zerophase = zerophase ) if low > 1 : msg = "Selected low corner frequency is above Nyquist." raise ValueError ( msg ) z , p , k = iirfilter ( corners , [ low , high ] , btype = 'band' , ftype = 'butter' , output = 'zpk' ) sos = zpk2sos ( z , p , k ) if zerophase : firstpass = sosfilt ( sos , data ) return sosfilt ( sos , firstpass [ : : - 1 ] ) [ : : - 1 ] else : return sosfilt ( sos , data ) | Butterworth - Bandpass Filter . |
48,729 | def bandstop ( data , freqmin , freqmax , df , corners = 4 , zerophase = False ) : fe = 0.5 * df low = freqmin / fe high = freqmax / fe if high > 1 : high = 1.0 msg = "Selected high corner frequency is above Nyquist. " + "Setting Nyquist as high corner." warnings . warn ( msg ) if low > 1 : msg = "Selected low corner frequency is above Nyquist." raise ValueError ( msg ) z , p , k = iirfilter ( corners , [ low , high ] , btype = 'bandstop' , ftype = 'butter' , output = 'zpk' ) sos = zpk2sos ( z , p , k ) if zerophase : firstpass = sosfilt ( sos , data ) return sosfilt ( sos , firstpass [ : : - 1 ] ) [ : : - 1 ] else : return sosfilt ( sos , data ) | Butterworth - Bandstop Filter . |
48,730 | def lowpass ( data , freq , df , corners = 4 , zerophase = False ) : fe = 0.5 * df f = freq / fe if f > 1 : f = 1.0 msg = "Selected corner frequency is above Nyquist. " + "Setting Nyquist as high corner." warnings . warn ( msg ) z , p , k = iirfilter ( corners , f , btype = 'lowpass' , ftype = 'butter' , output = 'zpk' ) sos = zpk2sos ( z , p , k ) if zerophase : firstpass = sosfilt ( sos , data ) return sosfilt ( sos , firstpass [ : : - 1 ] ) [ : : - 1 ] else : return sosfilt ( sos , data ) | Butterworth - Lowpass Filter . |
48,731 | def integer_decimation ( data , decimation_factor ) : if not isinstance ( decimation_factor , int ) : msg = "Decimation_factor must be an integer!" raise TypeError ( msg ) data = np . array ( data [ : : decimation_factor ] ) return data | Downsampling by applying a simple integer decimation . |
48,732 | def _distributeCells ( numCellsPop ) : from . . import sim hostCells = { } for i in range ( sim . nhosts ) : hostCells [ i ] = [ ] for i in range ( numCellsPop ) : hostCells [ sim . nextHost ] . append ( i ) sim . nextHost += 1 if sim . nextHost >= sim . nhosts : sim . nextHost = 0 if sim . cfg . verbose : print ( ( "Distributed population of %i cells on %s hosts: %s, next: %s" % ( numCellsPop , sim . nhosts , hostCells , sim . nextHost ) ) ) return hostCells | distribute cells across compute nodes using round - robin |
48,733 | def getCSD ( lfps , sampr , minf = 0.05 , maxf = 300 , norm = True , vaknin = False , spacing = 1.0 ) : datband = getbandpass ( lfps , sampr , minf , maxf ) if datband . shape [ 0 ] > datband . shape [ 1 ] : ax = 1 else : ax = 0 if vaknin : datband = Vaknin ( datband ) if norm : removemean ( datband , ax = ax ) CSD = - numpy . diff ( datband , n = 2 , axis = ax ) / spacing ** 2 return CSD | get current source density approximation using set of local field potentials with equidistant spacing first performs a lowpass filter lfps is a list or numpy array of LFPs arranged spatially by column spacing is in microns |
48,734 | def createSynapses ( self ) : synsoma = h . ExpSyn ( self . soma ( 0.5 ) ) synsoma . tau = 2 synsoma . e = 0 syndend = h . ExpSyn ( self . dend ( 0.5 ) ) syndend . tau = 2 syndend . e = 0 self . synlist . append ( synsoma ) self . synlist . append ( syndend ) | Add an exponentially decaying synapse |
48,735 | def createNetcon ( self , thresh = 10 ) : nc = h . NetCon ( self . soma ( 0.5 ) . _ref_v , None , sec = self . soma ) nc . threshold = thresh return nc | created netcon to record spikes |
48,736 | def createSections ( self ) : self . soma = h . Section ( name = 'soma' , cell = self ) self . dend = h . Section ( name = 'dend' , cell = self ) | Create the sections of the cell . |
48,737 | def defineGeometry ( self ) : self . soma . L = 18.8 self . soma . diam = 18.8 self . soma . Ra = 123.0 self . dend . L = 200.0 self . dend . diam = 1.0 self . dend . Ra = 100.0 | Set the 3D geometry of the cell . |
48,738 | def defineBiophysics ( self ) : self . soma . insert ( 'hh' ) self . soma . gnabar_hh = 0.12 self . soma . gkbar_hh = 0.036 self . soma . gl_hh = 0.003 self . soma . el_hh = - 70 self . dend . insert ( 'pas' ) self . dend . g_pas = 0.001 self . dend . e_pas = - 65 self . dend . nseg = 1000 | Assign the membrane properties across the cell . |
48,739 | def shapeplot ( h , ax , sections = None , order = 'pre' , cvals = None , clim = None , cmap = cm . YlOrBr_r , legend = True , ** kwargs ) : if sections is None : if order == 'pre' : sections = allsec_preorder ( h ) else : sections = list ( h . allsec ( ) ) if cvals is not None and clim is None : clim = [ np . nanmin ( cvals ) , np . nanmax ( cvals ) ] lines = [ ] i = 0 allDiams = [ ] for sec in sections : allDiams . append ( get_section_diams ( h , sec ) ) for isec , sec in enumerate ( sections ) : xyz = get_section_path ( h , sec ) seg_paths = interpolate_jagged ( xyz , sec . nseg ) diams = allDiams [ isec ] linewidths = diams for ( j , path ) in enumerate ( seg_paths ) : line , = plt . plot ( path [ : , 0 ] , path [ : , 1 ] , path [ : , 2 ] , '-k' , ** kwargs ) try : line . set_linewidth ( linewidths [ j ] ) except : pass if cvals is not None : if isinstance ( cvals [ i ] , numbers . Number ) : try : col = cmap ( int ( ( cvals [ i ] - clim [ 0 ] ) * 255 / ( clim [ 1 ] - clim [ 0 ] ) ) ) except : col = cmap ( 0 ) else : col = cvals [ i ] line . set_color ( col ) lines . append ( line ) i += 1 return lines | Plots a 3D shapeplot |
48,740 | def shapeplot_animate ( v , lines , nframes = None , tscale = 'linear' , clim = [ - 80 , 50 ] , cmap = cm . YlOrBr_r ) : if nframes is None : nframes = v . shape [ 0 ] if tscale == 'linear' : def animate ( i ) : i_t = int ( ( i / nframes ) * v . shape [ 0 ] ) for i_seg in range ( v . shape [ 1 ] ) : lines [ i_seg ] . set_color ( cmap ( int ( ( v [ i_t , i_seg ] - clim [ 0 ] ) * 255 / ( clim [ 1 ] - clim [ 0 ] ) ) ) ) return [ ] elif tscale == 'log' : def animate ( i ) : i_t = int ( np . round ( ( v . shape [ 0 ] ** ( 1.0 / ( nframes - 1 ) ) ) ** i - 1 ) ) for i_seg in range ( v . shape [ 1 ] ) : lines [ i_seg ] . set_color ( cmap ( int ( ( v [ i_t , i_seg ] - clim [ 0 ] ) * 255 / ( clim [ 1 ] - clim [ 0 ] ) ) ) ) return [ ] else : raise ValueError ( "Unrecognized option '%s' for tscale" % tscale ) return animate | Returns animate function which updates color of shapeplot |
48,741 | def mark_locations ( h , section , locs , markspec = 'or' , ** kwargs ) : xyz = get_section_path ( h , section ) ( r , theta , phi ) = sequential_spherical ( xyz ) rcum = np . append ( 0 , np . cumsum ( r ) ) if type ( locs ) is float or type ( locs ) is np . float64 : locs = np . array ( [ locs ] ) if type ( locs ) is list : locs = np . array ( locs ) lengths = locs * rcum [ - 1 ] xyz_marks = [ ] for targ_length in lengths : xyz_marks . append ( find_coord ( targ_length , xyz , rcum , theta , phi ) ) xyz_marks = np . array ( xyz_marks ) line , = plt . plot ( xyz_marks [ : , 0 ] , xyz_marks [ : , 1 ] , xyz_marks [ : , 2 ] , markspec , ** kwargs ) return line | Marks one or more locations on along a section . Could be used to mark the location of a recording or electrical stimulation . |
48,742 | def root_sections ( h ) : roots = [ ] for section in h . allsec ( ) : sref = h . SectionRef ( sec = section ) if sref . has_parent ( ) < 0.9 : roots . append ( section ) return roots | Returns a list of all sections that have no parent . |
48,743 | def leaf_sections ( h ) : leaves = [ ] for section in h . allsec ( ) : sref = h . SectionRef ( sec = section ) if sref . nchild ( ) < 0.9 : leaves . append ( section ) return leaves | Returns a list of all sections that have no children . |
48,744 | def root_indices ( sec_list ) : roots = [ ] for i , section in enumerate ( sec_list ) : sref = h . SectionRef ( sec = section ) if sref . has_parent ( ) < 0.9 : roots . append ( i ) return roots | Returns the index of all sections without a parent . |
48,745 | def branch_order ( h , section , path = [ ] ) : path . append ( section ) sref = h . SectionRef ( sec = section ) if sref . has_parent ( ) < 0.9 : return 0 else : nchild = len ( list ( h . SectionRef ( sec = sref . parent ) . child ) ) if nchild <= 1.1 : return branch_order ( h , sref . parent , path ) else : return 1 + branch_order ( h , sref . parent , path ) | Returns the branch order of a section |
48,746 | def createCells ( self ) : if 'cellsList' in self . tags : cells = self . createCellsList ( ) elif 'numCells' in self . tags : cells = self . createCellsFixedNum ( ) elif 'density' in self . tags : cells = self . createCellsDensity ( ) elif 'gridSpacing' in self . tags : cells = self . createCellsGrid ( ) else : self . tags [ 'numCells' ] = 1 print ( 'Warninig: number or density of cells not specified for population %s; defaulting to numCells = 1' % ( self . tags [ 'pop' ] ) ) cells = self . createCellsFixedNum ( ) return cells | Function to instantiate Cell objects based on the characteristics of this population |
48,747 | def createCellsList ( self ) : from . . import sim cells = [ ] self . tags [ 'numCells' ] = len ( self . tags [ 'cellsList' ] ) for i in self . _distributeCells ( len ( self . tags [ 'cellsList' ] ) ) [ sim . rank ] : gid = sim . net . lastGid + i self . cellGids . append ( gid ) cellTags = { k : v for ( k , v ) in self . tags . items ( ) if k in sim . net . params . popTagsCopiedToCells } cellTags [ 'pop' ] = self . tags [ 'pop' ] cellTags . update ( self . tags [ 'cellsList' ] [ i ] ) for coord in [ 'x' , 'y' , 'z' ] : if coord in cellTags : cellTags [ coord + 'norm' ] = cellTags [ coord ] / getattr ( sim . net . params , 'size' + coord . upper ( ) ) elif coord + 'norm' in cellTags : cellTags [ coord ] = cellTags [ coord + 'norm' ] * getattr ( sim . net . params , 'size' + coord . upper ( ) ) else : cellTags [ coord + 'norm' ] = cellTags [ coord ] = 0 if 'cellModel' in self . tags . keys ( ) and self . tags [ 'cellModel' ] == 'Vecstim' : cellTags [ 'params' ] [ 'spkTimes' ] = self . tags [ 'cellsList' ] [ i ] [ 'spkTimes' ] cells . append ( self . cellModelClass ( gid , cellTags ) ) if sim . cfg . verbose : print ( ( 'Cell %d/%d (gid=%d) of pop %d, on node %d, ' % ( i , self . tags [ 'numCells' ] - 1 , gid , i , sim . rank ) ) ) sim . net . lastGid = sim . net . lastGid + len ( self . tags [ 'cellsList' ] ) return cells | Create population cells based on list of individual cells |
48,748 | def create ( netParams = None , simConfig = None , output = False ) : from . . import sim import __main__ as top if not netParams : netParams = top . netParams if not simConfig : simConfig = top . simConfig sim . initialize ( netParams , simConfig ) pops = sim . net . createPops ( ) cells = sim . net . createCells ( ) conns = sim . net . connectCells ( ) stims = sim . net . addStims ( ) rxd = sim . net . addRxD ( ) simData = sim . setupRecording ( ) if output : return ( pops , cells , conns , rxd , stims , simData ) | Sequence of commands to create network |
48,749 | def intervalSimulate ( interval ) : from . . import sim sim . runSimWithIntervalFunc ( interval , sim . intervalSave ) sim . fileGather ( ) | Sequence of commands to simulate network |
48,750 | def load ( filename , simConfig = None , output = False , instantiate = True , createNEURONObj = True ) : from . . import sim sim . initialize ( ) sim . cfg . createNEURONObj = createNEURONObj sim . loadAll ( filename , instantiate = instantiate , createNEURONObj = createNEURONObj ) if simConfig : sim . setSimCfg ( simConfig ) if len ( sim . net . cells ) == 0 and instantiate : pops = sim . net . createPops ( ) cells = sim . net . createCells ( ) conns = sim . net . connectCells ( ) stims = sim . net . addStims ( ) rxd = sim . net . addRxD ( ) simData = sim . setupRecording ( ) if output : try : return ( pops , cells , conns , stims , rxd , simData ) except : pass | Sequence of commands load simulate and analyse network |
48,751 | def createExportNeuroML2 ( netParams = None , simConfig = None , reference = None , connections = True , stimulations = True , output = False , format = 'xml' ) : from . . import sim import __main__ as top if not netParams : netParams = top . netParams if not simConfig : simConfig = top . simConfig sim . initialize ( netParams , simConfig ) pops = sim . net . createPops ( ) cells = sim . net . createCells ( ) conns = sim . net . connectCells ( ) stims = sim . net . addStims ( ) rxd = sim . net . addRxD ( ) simData = sim . setupRecording ( ) sim . exportNeuroML2 ( reference , connections , stimulations , format ) if output : return ( pops , cells , conns , stims , rxd , simData ) | Sequence of commands to create and export network to NeuroML2 |
48,752 | def exception ( function ) : @ functools . wraps ( function ) def wrapper ( * args , ** kwargs ) : try : return function ( * args , ** kwargs ) except Exception as e : err = "There was an exception in %s():" % ( function . __name__ ) print ( ( "%s \n %s \n%s" % ( err , e , sys . exc_info ( ) ) ) ) return - 1 return wrapper | A decorator that wraps the passed in function and prints exception should one occur |
48,753 | def getSpktSpkid ( cellGids = [ ] , timeRange = None , allCells = False ) : from . . import sim import pandas as pd try : from pandas import _lib as pandaslib except : from pandas import lib as pandaslib df = pd . DataFrame ( pandaslib . to_object_array ( [ sim . allSimData [ 'spkt' ] , sim . allSimData [ 'spkid' ] ] ) . transpose ( ) , columns = [ 'spkt' , 'spkid' ] ) if timeRange : min , max = [ int ( df [ 'spkt' ] . searchsorted ( timeRange [ i ] ) ) for i in range ( 2 ) ] else : min , max = 0 , len ( df ) if len ( cellGids ) == 0 or allCells : sel = df [ min : max ] else : sel = df [ min : max ] . query ( 'spkid in @cellGids' ) return sel , sel [ 'spkt' ] . tolist ( ) , sel [ 'spkid' ] . tolist ( ) | return spike ids and times ; with allCells = True just need to identify slice of time so can omit cellGids |
48,754 | def calcTransferResistance ( self , gid , seg_coords ) : sigma = 0.3 r05 = ( seg_coords [ 'p0' ] + seg_coords [ 'p1' ] ) / 2 dl = seg_coords [ 'p1' ] - seg_coords [ 'p0' ] nseg = r05 . shape [ 1 ] tr = np . zeros ( ( self . nsites , nseg ) ) for j in range ( self . nsites ) : rel = np . expand_dims ( self . pos [ : , j ] , axis = 1 ) rel_05 = rel - r05 r2 = np . einsum ( 'ij,ij->j' , rel_05 , rel_05 ) rlldl = np . einsum ( 'ij,ij->j' , rel_05 , dl ) dlmag = np . linalg . norm ( dl , axis = 0 ) rll = abs ( rlldl / dlmag ) rT2 = r2 - rll ** 2 up = rll + dlmag / 2 low = rll - dlmag / 2 num = up + np . sqrt ( up ** 2 + rT2 ) den = low + np . sqrt ( low ** 2 + rT2 ) tr [ j , : ] = np . log ( num / den ) / dlmag tr *= 1 / ( 4 * math . pi * sigma ) self . transferResistances [ gid ] = tr | Precompute mapping from segment to electrode locations |
48,755 | def importConnFromExcel ( fileName , sheetName ) : import openpyxl as xl colPreTags = 0 colPostTags = 1 colConnFunc = 2 colSyn = 3 colProb = 5 colWeight = 6 colAnnot = 8 outFileName = fileName [ : - 5 ] + '_' + sheetName + '.py' connText = wb = xl . load_workbook ( fileName ) sheet = wb . get_sheet_by_name ( sheetName ) numRows = sheet . get_highest_row ( ) with open ( outFileName , 'w' ) as f : f . write ( connText ) for row in range ( 1 , numRows + 1 ) : if sheet . cell ( row = row , column = colProb ) . value : print ( 'Creating conn rule for row ' + str ( row ) ) pre = sheet . cell ( row = row , column = colPreTags ) . value post = sheet . cell ( row = row , column = colPostTags ) . value func = sheet . cell ( row = row , column = colConnFunc ) . value syn = sheet . cell ( row = row , column = colSyn ) . value prob = sheet . cell ( row = row , column = colProb ) . value weight = sheet . cell ( row = row , column = colWeight ) . value line = "netParams['connParams'].append({'preConds': {" for i , cond in enumerate ( pre . split ( ';' ) ) : if i > 0 : line = line + ", " cond2 = cond . split ( '=' ) line = line + "'" + cond2 [ 0 ] . replace ( ' ' , '' ) + "': " + cond2 [ 1 ] . replace ( ' ' , '' ) line = line + "}" line = line + ",\n'postConds': {" for i , cond in enumerate ( post . split ( ';' ) ) : if i > 0 : line = line + ", " cond2 = cond . split ( '=' ) line = line + "'" + cond2 [ 0 ] . replace ( ' ' , '' ) + "': " + cond2 [ 1 ] . replace ( ' ' , '' ) line = line + "}" line = line + ",\n'connFunc': '" + func + "'" line = line + ",\n'synMech': '" + syn + "'" line = line + ",\n'probability': " + str ( prob ) line = line + ",\n'weight': " + str ( weight ) line = line + "})" line = line + '\n\n' f . write ( line ) | Import connectivity rules from Excel sheet |
48,756 | def safe_dump ( data , stream = None , ** kwds ) : return yaml . dump ( data , stream = stream , Dumper = ODYD , ** kwds ) | implementation of safe dumper using Ordered Dict Yaml Dumper |
48,757 | def dump ( data , ** kwds ) : if _usedefaultyamlloader : return yaml . safe_dump ( data , ** kwds ) else : return odyldo . safe_dump ( data , ** kwds ) | dump the data as YAML |
48,758 | def bibtex ( self ) : warnings . warn ( "bibtex should be queried with ads.ExportQuery(); You will " "hit API ratelimits very quickly otherwise." , UserWarning ) return ExportQuery ( bibcodes = self . bibcode , format = "bibtex" ) . execute ( ) | Return a BiBTeX entry for the current article . |
48,759 | def get_pdf ( article , debug = False ) : print ( 'Retrieving {0}' . format ( article ) ) identifier = [ _ for _ in article . identifier if 'arXiv' in _ ] if identifier : url = 'http://arXiv.org/pdf/{0}.{1}' . format ( identifier [ 0 ] [ 9 : 13 ] , '' . join ( _ for _ in identifier [ 0 ] [ 14 : ] if _ . isdigit ( ) ) ) else : params = { 'bibcode' : article . bibcode , 'link_type' : 'ARTICLE' , 'db_key' : 'AST' } url = requests . get ( 'http://adsabs.harvard.edu/cgi-bin/nph-data_query' , params = params ) . url q = requests . get ( url ) if not q . ok : print ( 'Error retrieving {0}: {1} for {2}' . format ( article , q . status_code , url ) ) if debug : q . raise_for_status ( ) else : return None if q . content . endswith ( '</html>' ) : print ( 'Error retrieving {0}: 200 (access denied?) for {1}' . format ( article , url ) ) return None return q . content | Download an article PDF from arXiv . |
48,760 | def summarise_pdfs ( pdfs ) : print ( 'Summarising {0} articles ({1} had errors)' . format ( len ( pdfs ) , pdfs . count ( None ) ) ) pdfs = [ _ for _ in pdfs if _ is not None ] summary = PdfFileWriter ( ) for pdf in pdfs : summary . addPage ( PdfFileReader ( StringIO ( pdf ) ) . getPage ( 0 ) ) return summary | Collate the first page from each of the PDFs provided into a single PDF . |
48,761 | def execute ( self ) : self . response = MetricsResponse . load_http_response ( self . session . post ( self . HTTP_ENDPOINT , data = self . json_payload ) ) return self . response . metrics | Execute the http request to the metrics service |
48,762 | def get_info ( cls ) : return '\n' . join ( [ str ( cls . _instances [ key ] ) for key in cls . _instances ] ) | Print all of the instantiated Singletons |
48,763 | def load_http_response ( cls , http_response ) : if not http_response . ok : raise APIResponseError ( http_response . text ) c = cls ( http_response ) c . response = http_response RateLimits . getRateLimits ( cls . __name__ ) . set ( c . response . headers ) return c | This method should return an instantiated class and set its response to the requests . Response object . |
48,764 | def token ( self ) : if self . _token is None : for v in map ( os . environ . get , TOKEN_ENVIRON_VARS ) : if v is not None : self . _token = v return self . _token for f in TOKEN_FILES : try : with open ( f ) as fp : self . _token = fp . read ( ) . strip ( ) return self . _token except IOError : pass if ads . config . token is not None : self . _token = ads . config . token return self . _token warnings . warn ( "No token found" , RuntimeWarning ) return self . _token | set the instance attribute token following the following logic stopping whenever a token is found . Raises NoTokenFound is no token is found - environment variables TOKEN_ENVIRON_VARS - file containing plaintext as the contents in TOKEN_FILES - ads . config . token |
48,765 | def session ( self ) : if self . _session is None : self . _session = requests . session ( ) self . _session . headers . update ( { "Authorization" : "Bearer {}" . format ( self . token ) , "User-Agent" : "ads-api-client/{}" . format ( __version__ ) , "Content-Type" : "application/json" , } ) return self . _session | http session interface transparent proxy to requests . session |
48,766 | def from_csv ( input_csv_pattern , headers = None , schema_file = None ) : if headers is not None : names = headers elif schema_file is not None : with _util . open_local_or_gcs ( schema_file , mode = 'r' ) as f : schema = json . load ( f ) names = [ x [ 'name' ] for x in schema ] else : raise ValueError ( 'Either headers or schema_file is needed' ) metrics = Metrics ( input_csv_pattern = input_csv_pattern , headers = names ) return metrics | Create a Metrics instance from csv file pattern . |
48,767 | def from_bigquery ( sql ) : if isinstance ( sql , bq . Query ) : sql = sql . _expanded_sql ( ) parts = sql . split ( '.' ) if len ( parts ) == 1 or len ( parts ) > 3 or any ( ' ' in x for x in parts ) : sql = '(' + sql + ')' else : sql = '`' + sql + '`' metrics = Metrics ( bigquery = sql ) return metrics | Create a Metrics instance from a bigquery query or table . |
48,768 | def _get_data_from_csv_files ( self ) : all_df = [ ] for file_name in self . _input_csv_files : with _util . open_local_or_gcs ( file_name , mode = 'r' ) as f : all_df . append ( pd . read_csv ( f , names = self . _headers ) ) df = pd . concat ( all_df , ignore_index = True ) return df | Get data from input csv files . |
48,769 | def _get_data_from_bigquery ( self , queries ) : all_df = [ ] for query in queries : all_df . append ( query . execute ( ) . result ( ) . to_dataframe ( ) ) df = pd . concat ( all_df , ignore_index = True ) return df | Get data from bigquery table or query . |
48,770 | def _expanded_sql ( self ) : if not self . _sql : self . _sql = UDF . _build_udf ( self . _name , self . _code , self . _return_type , self . _params , self . _language , self . _imports ) return self . _sql | Get the expanded BigQuery SQL string of this UDF |
48,771 | def _build_udf ( name , code , return_type , params , language , imports ) : params = ',' . join ( [ '%s %s' % named_param for named_param in params ] ) imports = ',' . join ( [ 'library="%s"' % i for i in imports ] ) if language . lower ( ) == 'sql' : udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' + 'RETURNS {return_type}\n' + 'AS (\n' + '{code}\n' + ');' else : udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' + 'RETURNS {return_type}\n' + 'LANGUAGE {language}\n' + 'AS \n' + 'OPTIONS (\n' + '{imports}\n' + ');' return udf . format ( name = name , params = params , return_type = return_type , language = language , code = code , imports = imports ) | Creates the UDF part of a BigQuery query using its pieces |
48,772 | def created_on ( self ) : s = self . _info . get ( 'timeCreated' , None ) return dateutil . parser . parse ( s ) if s else None | The created timestamp of the bucket as a datetime . datetime . |
48,773 | def metadata ( self ) : if self . _info is None : try : self . _info = self . _api . buckets_get ( self . _name ) except Exception as e : raise e return BucketMetadata ( self . _info ) if self . _info else None | Retrieves metadata about the bucket . |
48,774 | def object ( self , key ) : return _object . Object ( self . _name , key , context = self . _context ) | Retrieves a Storage Object for the specified key in this bucket . |
48,775 | def objects ( self , prefix = None , delimiter = None ) : return _object . Objects ( self . _name , prefix , delimiter , context = self . _context ) | Get an iterator for the objects within this bucket . |
48,776 | def delete ( self ) : if self . exists ( ) : try : self . _api . buckets_delete ( self . _name ) except Exception as e : raise e | Deletes the bucket . |
48,777 | def contains ( self , name ) : try : self . _api . buckets_get ( name ) except google . datalab . utils . RequestException as e : if e . status == 404 : return False raise e except Exception as e : raise e return True | Checks if the specified bucket exists . |
48,778 | def item ( self , key ) : return _item . Item ( self . _name , key , context = self . _context ) | Retrieves an Item object for the specified key in this bucket . |
48,779 | def items ( self , prefix = None , delimiter = None ) : return _item . Items ( self . _name , prefix , delimiter , context = self . _context ) | Get an iterator for the items within this bucket . |
48,780 | def create ( self , project_id = None ) : if not self . exists ( ) : if project_id is None : project_id = self . _api . project_id try : self . _info = self . _api . buckets_insert ( self . _name , project_id = project_id ) except Exception as e : raise e return self | Creates the bucket . |
48,781 | def create ( self , name ) : return Bucket ( name , context = self . _context ) . create ( self . _project_id ) | Creates a new bucket . |
48,782 | def train ( train_dataset , eval_dataset , analysis_dir , output_dir , features , layer_sizes , max_steps = 5000 , num_epochs = None , train_batch_size = 100 , eval_batch_size = 16 , min_eval_frequency = 100 , learning_rate = 0.01 , epsilon = 0.0005 , job_name = None , cloud = None , ) : job = train_async ( train_dataset = train_dataset , eval_dataset = eval_dataset , analysis_dir = analysis_dir , output_dir = output_dir , features = features , layer_sizes = layer_sizes , max_steps = max_steps , num_epochs = num_epochs , train_batch_size = train_batch_size , eval_batch_size = eval_batch_size , min_eval_frequency = min_eval_frequency , learning_rate = learning_rate , epsilon = epsilon , job_name = job_name , cloud = cloud , ) job . wait ( ) print ( 'Training: ' + str ( job . state ) ) | Blocking version of train_async . See documentation for train_async . |
48,783 | def list ( self , pattern = '*' ) : if self . _descriptors is None : self . _descriptors = self . _client . list_resource_descriptors ( filter_string = self . _filter_string ) return [ resource for resource in self . _descriptors if fnmatch . fnmatch ( resource . type , pattern ) ] | Returns a list of resource descriptors that match the filters . |
48,784 | def _gcs_list_buckets ( project , pattern ) : data = [ { 'Bucket' : 'gs://' + bucket . name , 'Created' : bucket . metadata . created_on } for bucket in google . datalab . storage . Buckets ( _make_context ( project ) ) if fnmatch . fnmatch ( bucket . name , pattern ) ] return google . datalab . utils . commands . render_dictionary ( data , [ 'Bucket' , 'Created' ] ) | List all Google Cloud Storage buckets that match a pattern . |
48,785 | def _gcs_list_keys ( bucket , pattern ) : data = [ { 'Name' : obj . metadata . name , 'Type' : obj . metadata . content_type , 'Size' : obj . metadata . size , 'Updated' : obj . metadata . updated_on } for obj in _gcs_get_keys ( bucket , pattern ) ] return google . datalab . utils . commands . render_dictionary ( data , [ 'Name' , 'Type' , 'Size' , 'Updated' ] ) | List all Google Cloud Storage keys in a specified bucket that match a pattern . |
48,786 | def prepare_image_transforms ( element , image_columns ) : import base64 import cStringIO from PIL import Image from tensorflow . python . lib . io import file_io as tf_file_io from apache_beam . metrics import Metrics img_error_count = Metrics . counter ( 'main' , 'ImgErrorCount' ) img_missing_count = Metrics . counter ( 'main' , 'ImgMissingCount' ) for name in image_columns : uri = element [ name ] if not uri : img_missing_count . inc ( ) continue try : with tf_file_io . FileIO ( uri , 'r' ) as f : img = Image . open ( f ) . convert ( 'RGB' ) except Exception as e : logging . exception ( 'Error processing image %s: %s' , uri , str ( e ) ) img_error_count . inc ( ) return output = cStringIO . StringIO ( ) img . save ( output , 'jpeg' ) element [ name ] = base64 . urlsafe_b64encode ( output . getvalue ( ) ) return element | Replace an images url with its jpeg bytes . |
48,787 | def decode_csv ( csv_string , column_names ) : import csv r = next ( csv . reader ( [ csv_string ] ) ) if len ( r ) != len ( column_names ) : raise ValueError ( 'csv line %s does not have %d columns' % ( csv_string , len ( column_names ) ) ) return { k : v for k , v in zip ( column_names , r ) } | Parse a csv line into a dict . |
48,788 | def encode_csv ( data_dict , column_names ) : import csv import six values = [ str ( data_dict [ x ] ) for x in column_names ] str_buff = six . StringIO ( ) writer = csv . writer ( str_buff , lineterminator = '' ) writer . writerow ( values ) return str_buff . getvalue ( ) | Builds a csv string . |
48,789 | def serialize_example ( transformed_json_data , info_dict ) : import six import tensorflow as tf def _make_int64_list ( x ) : return tf . train . Feature ( int64_list = tf . train . Int64List ( value = x ) ) def _make_bytes_list ( x ) : return tf . train . Feature ( bytes_list = tf . train . BytesList ( value = x ) ) def _make_float_list ( x ) : return tf . train . Feature ( float_list = tf . train . FloatList ( value = x ) ) if sorted ( six . iterkeys ( transformed_json_data ) ) != sorted ( six . iterkeys ( info_dict ) ) : raise ValueError ( 'Keys do not match %s, %s' % ( list ( six . iterkeys ( transformed_json_data ) ) , list ( six . iterkeys ( info_dict ) ) ) ) ex_dict = { } for name , info in six . iteritems ( info_dict ) : if info [ 'dtype' ] == tf . int64 : ex_dict [ name ] = _make_int64_list ( transformed_json_data [ name ] ) elif info [ 'dtype' ] == tf . float32 : ex_dict [ name ] = _make_float_list ( transformed_json_data [ name ] ) elif info [ 'dtype' ] == tf . string : ex_dict [ name ] = _make_bytes_list ( transformed_json_data [ name ] ) else : raise ValueError ( 'Unsupported data type %s' % info [ 'dtype' ] ) ex = tf . train . Example ( features = tf . train . Features ( feature = ex_dict ) ) return ex . SerializeToString ( ) | Makes a serialized tf . example . |
48,790 | def preprocess ( pipeline , args ) : from tensorflow . python . lib . io import file_io from trainer import feature_transforms schema = json . loads ( file_io . read_file_to_string ( os . path . join ( args . analysis , feature_transforms . SCHEMA_FILE ) ) . decode ( ) ) features = json . loads ( file_io . read_file_to_string ( os . path . join ( args . analysis , feature_transforms . FEATURES_FILE ) ) . decode ( ) ) stats = json . loads ( file_io . read_file_to_string ( os . path . join ( args . analysis , feature_transforms . STATS_FILE ) ) . decode ( ) ) column_names = [ col [ 'name' ] for col in schema ] if args . csv : all_files = [ ] for i , file_pattern in enumerate ( args . csv ) : all_files . append ( pipeline | ( 'ReadCSVFile%d' % i ) >> beam . io . ReadFromText ( file_pattern ) ) raw_data = ( all_files | 'MergeCSVFiles' >> beam . Flatten ( ) | 'ParseCSVData' >> beam . Map ( decode_csv , column_names ) ) else : columns = ', ' . join ( column_names ) query = 'SELECT {columns} FROM `{table}`' . format ( columns = columns , table = args . bigquery ) raw_data = ( pipeline | 'ReadBiqQueryData' >> beam . io . Read ( beam . io . BigQuerySource ( query = query , use_standard_sql = True ) ) ) image_columns = image_transform_columns ( features ) clean_csv_data = ( raw_data | 'PreprocessTransferredLearningTransformations' >> beam . Map ( prepare_image_transforms , image_columns ) | 'BuildCSVString' >> beam . Map ( encode_csv , column_names ) ) if args . shuffle : clean_csv_data = clean_csv_data | 'ShuffleData' >> shuffle ( ) transform_dofn = TransformFeaturesDoFn ( args . analysis , features , schema , stats ) ( transformed_data , errors ) = ( clean_csv_data | 'Batch Input' >> beam . ParDo ( EmitAsBatchDoFn ( args . batch_size ) ) | 'Run TF Graph on Batches' >> beam . ParDo ( transform_dofn ) . with_outputs ( 'errors' , main = 'main' ) ) _ = ( transformed_data | 'SerializeExamples' >> beam . Map ( serialize_example , feature_transforms . get_transformed_feature_info ( features , schema ) ) | 'WriteExamples' >> beam . io . WriteToTFRecord ( os . path . join ( args . output , args . prefix ) , file_name_suffix = '.tfrecord.gz' ) ) _ = ( errors | 'WriteErrors' >> beam . io . WriteToText ( os . path . join ( args . output , 'errors_' + args . prefix ) , file_name_suffix = '.txt' ) ) | Transfrom csv data into transfromed tf . example files . |
48,791 | def main ( argv = None ) : args = parse_arguments ( sys . argv if argv is None else argv ) temp_dir = os . path . join ( args . output , 'tmp' ) if args . cloud : pipeline_name = 'DataflowRunner' else : pipeline_name = 'DirectRunner' os . environ [ 'TF_CPP_MIN_LOG_LEVEL' ] = '3' options = { 'job_name' : args . job_name , 'temp_location' : temp_dir , 'project' : args . project_id , 'setup_file' : os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , 'setup.py' ) ) , } if args . num_workers : options [ 'num_workers' ] = args . num_workers if args . worker_machine_type : options [ 'worker_machine_type' ] = args . worker_machine_type pipeline_options = beam . pipeline . PipelineOptions ( flags = [ ] , ** options ) p = beam . Pipeline ( pipeline_name , options = pipeline_options ) preprocess ( pipeline = p , args = args ) pipeline_result = p . run ( ) if not args . async : pipeline_result . wait_until_finish ( ) if args . async and args . cloud : print ( 'View job at https://console.developers.google.com/dataflow/job/%s?project=%s' % ( pipeline_result . job_id ( ) , args . project_id ) ) | Run Preprocessing as a Dataflow . |
48,792 | def start_bundle ( self , element = None ) : import tensorflow as tf from trainer import feature_transforms g = tf . Graph ( ) session = tf . Session ( graph = g ) with g . as_default ( ) : transformed_features , _ , placeholders = ( feature_transforms . build_csv_serving_tensors_for_transform_step ( analysis_path = self . _analysis_output_dir , features = self . _features , schema = self . _schema , stats = self . _stats , keep_target = True ) ) session . run ( tf . tables_initializer ( ) ) self . _session = session self . _transformed_features = transformed_features self . _input_placeholder_tensor = placeholders [ 'csv_example' ] | Build the transfromation graph once . |
48,793 | def process ( self , element ) : import apache_beam as beam import six import tensorflow as tf tf . logging . set_verbosity ( tf . logging . ERROR ) try : clean_element = [ ] for line in element : clean_element . append ( line . rstrip ( ) ) batch_result = self . _session . run ( fetches = self . _transformed_features , feed_dict = { self . _input_placeholder_tensor : clean_element } ) for i in range ( len ( clean_element ) ) : transformed_features = { } for name , value in six . iteritems ( batch_result ) : if isinstance ( value , tf . SparseTensorValue ) : batch_i_indices = value . indices [ : , 0 ] == i batch_i_values = value . values [ batch_i_indices ] transformed_features [ name ] = batch_i_values . tolist ( ) else : transformed_features [ name ] = value [ i ] . tolist ( ) yield transformed_features except Exception as e : yield beam . pvalue . TaggedOutput ( 'errors' , ( str ( e ) , element ) ) | Run the transformation graph on batched input data |
48,794 | def parse_row ( schema , data ) : def parse_value ( data_type , value ) : if value is not None : if value == 'null' : value = None elif data_type == 'INTEGER' : value = int ( value ) elif data_type == 'FLOAT' : value = float ( value ) elif data_type == 'TIMESTAMP' : value = datetime . datetime . utcfromtimestamp ( float ( value ) ) elif data_type == 'BOOLEAN' : value = value == 'true' elif ( type ( value ) != str ) : value = str ( value ) return value row = { } if data is None : return row for i , ( field , schema_field ) in enumerate ( zip ( data [ 'f' ] , schema ) ) : val = field [ 'v' ] name = schema_field [ 'name' ] data_type = schema_field [ 'type' ] repeated = True if 'mode' in schema_field and schema_field [ 'mode' ] == 'REPEATED' else False if repeated and val is None : row [ name ] = [ ] elif data_type == 'RECORD' : sub_schema = schema_field [ 'fields' ] if repeated : row [ name ] = [ Parser . parse_row ( sub_schema , v [ 'v' ] ) for v in val ] else : row [ name ] = Parser . parse_row ( sub_schema , val ) elif repeated : row [ name ] = [ parse_value ( data_type , v [ 'v' ] ) for v in val ] else : row [ name ] = parse_value ( data_type , val ) return row | Parses a row from query results into an equivalent object . |
48,795 | def _tf_predict ( model_dir , input_csvlines ) : with tf . Graph ( ) . as_default ( ) , tf . Session ( ) as sess : input_alias_map , output_alias_map = _tf_load_model ( sess , model_dir ) csv_tensor_name = list ( input_alias_map . values ( ) ) [ 0 ] results = sess . run ( fetches = output_alias_map , feed_dict = { csv_tensor_name : input_csvlines } ) if len ( input_csvlines ) == 1 : for k , v in six . iteritems ( results ) : if not isinstance ( v , ( list , np . ndarray ) ) : results [ k ] = [ v ] for k , v in six . iteritems ( results ) : if any ( isinstance ( x , bytes ) for x in v ) : results [ k ] = [ x . decode ( 'utf-8' ) for x in v ] return results | Prediction with a tf savedmodel . |
48,796 | def _download_images ( data , img_cols ) : images = collections . defaultdict ( list ) for d in data : for img_col in img_cols : if d . get ( img_col , None ) : if isinstance ( d [ img_col ] , Image . Image ) : images [ img_col ] . append ( d [ img_col ] ) else : with file_io . FileIO ( d [ img_col ] , 'rb' ) as fi : im = Image . open ( fi ) images [ img_col ] . append ( im ) else : images [ img_col ] . append ( '' ) return images | Download images given image columns . |
48,797 | def _get_predicton_csv_lines ( data , headers , images ) : if images : data = copy . deepcopy ( data ) for img_col in images : for d , im in zip ( data , images [ img_col ] ) : if im == '' : continue im = im . copy ( ) im . thumbnail ( ( 299 , 299 ) , Image . ANTIALIAS ) buf = BytesIO ( ) im . save ( buf , "JPEG" ) content = base64 . urlsafe_b64encode ( buf . getvalue ( ) ) . decode ( 'ascii' ) d [ img_col ] = content csv_lines = [ ] for d in data : buf = six . StringIO ( ) writer = csv . DictWriter ( buf , fieldnames = headers , lineterminator = '' ) writer . writerow ( d ) csv_lines . append ( buf . getvalue ( ) ) return csv_lines | Create CSV lines from list - of - dict data . |
48,798 | def _get_display_data_with_images ( data , images ) : if not images : return data display_data = copy . deepcopy ( data ) for img_col in images : for d , im in zip ( display_data , images [ img_col ] ) : if im == '' : d [ img_col + '_image' ] = '' else : im = im . copy ( ) im . thumbnail ( ( 128 , 128 ) , Image . ANTIALIAS ) buf = BytesIO ( ) im . save ( buf , "PNG" ) content = base64 . b64encode ( buf . getvalue ( ) ) . decode ( 'ascii' ) d [ img_col + '_image' ] = content return display_data | Create display data by converting image urls to base64 strings . |
48,799 | def get_model_schema_and_features ( model_dir ) : schema_file = os . path . join ( model_dir , 'assets.extra' , 'schema.json' ) schema = json . loads ( file_io . read_file_to_string ( schema_file ) ) features_file = os . path . join ( model_dir , 'assets.extra' , 'features.json' ) features_config = json . loads ( file_io . read_file_to_string ( features_file ) ) return schema , features_config | Get a local model s schema and features config . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.