idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
51,600
def _get ( self , key , section = None , default = _onion_dict_guard ) : if section is not None : section_dict = self . __sections . get ( section , { } ) if key in section_dict : return section_dict [ key ] for d in self . __dictionaries : if key in d : return d [ key ] if default is _onion_dict_guard : raise KeyError ( key ) else : return default
Try to get the key from each dict in turn . If you specify the optional section it looks there first .
51,601
def _parse_snapshots ( self ) : try : snap = self . _list_snapshots ( ) except OSError as err : logging . error ( "unable to list local snapshots!" ) return { } vols = { } for line in snap . splitlines ( ) : if len ( line ) == 0 : continue name , used , refer , mountpoint , written = line . split ( '\t' ) vol_name , snap_name = name . split ( '@' , 1 ) snapshots = vols . setdefault ( vol_name , OrderedDict ( ) ) snapshots [ snap_name ] = { 'name' : name , 'used' : used , 'refer' : refer , 'mountpoint' : mountpoint , 'written' : written , } return vols
Returns all snapshots grouped by filesystem a dict of OrderedDict s The order of snapshots matters when determining parents for incremental send so it s preserved . Data is indexed by filesystem then for each filesystem we have an OrderedDict of snapshots .
51,602
def _compress ( self , cmd ) : compressor = COMPRESSORS . get ( self . compressor ) if compressor is None : return cmd compress_cmd = compressor [ 'compress' ] return "{} | {}" . format ( compress_cmd , cmd )
Adds the appropriate command to compress the zfs stream
51,603
def _decompress ( self , cmd , s3_snap ) : compressor = COMPRESSORS . get ( s3_snap . compressor ) if compressor is None : return cmd decompress_cmd = compressor [ 'decompress' ] return "{} | {}" . format ( decompress_cmd , cmd )
Adds the appropriate command to decompress the zfs stream This is determined from the metadata of the s3_snap .
51,604
def backup_full ( self , snap_name = None , dry_run = False ) : z_snap = self . _snapshot_to_backup ( snap_name ) estimated_size = self . _parse_estimated_size ( self . _cmd . shell ( "zfs send -nvP '{}'" . format ( z_snap . name ) , capture = True ) ) self . _cmd . pipe ( "zfs send '{}'" . format ( z_snap . name ) , self . _compress ( self . _pput_cmd ( estimated = estimated_size , s3_prefix = self . s3_manager . s3_prefix , snap_name = z_snap . name ) ) , dry_run = dry_run , estimated_size = estimated_size , ) return [ { 'snap_name' : z_snap . name , 'size' : estimated_size } ]
Do a full backup of a snapshot . By default latest local snapshot
51,605
def backup_incremental ( self , snap_name = None , dry_run = False ) : z_snap = self . _snapshot_to_backup ( snap_name ) to_upload = [ ] current = z_snap uploaded_meta = [ ] while True : s3_snap = self . s3_manager . get ( current . name ) if s3_snap is not None : if not s3_snap . is_healthy : raise IntegrityError ( "Broken snapshot detected {}, reason: '{}'" . format ( s3_snap . name , s3_snap . reason_broken ) ) break to_upload . append ( current ) if current . parent is None : break current = current . parent for z_snap in reversed ( to_upload ) : estimated_size = self . _parse_estimated_size ( self . _cmd . shell ( "zfs send -nvP -i '{}' '{}'" . format ( z_snap . parent . name , z_snap . name ) , capture = True ) ) self . _cmd . pipe ( "zfs send -i '{}' '{}'" . format ( z_snap . parent . name , z_snap . name ) , self . _compress ( self . _pput_cmd ( estimated = estimated_size , parent = z_snap . parent . name , s3_prefix = self . s3_manager . s3_prefix , snap_name = z_snap . name ) ) , dry_run = dry_run , estimated_size = estimated_size , ) uploaded_meta . append ( { 'snap_name' : z_snap . name , 'size' : estimated_size } ) return uploaded_meta
Uploads named snapshot or latest along with any other snapshots required for an incremental backup .
51,606
def extrude ( self , uem , reference , collar = 0.0 , skip_overlap = False ) : if collar == 0. and not skip_overlap : return uem collars , overlap_regions = [ ] , [ ] if collar > 0. : for segment in reference . itersegments ( ) : t = segment . start collars . append ( Segment ( t - .5 * collar , t + .5 * collar ) ) t = segment . end collars . append ( Segment ( t - .5 * collar , t + .5 * collar ) ) if skip_overlap : for ( segment1 , track1 ) , ( segment2 , track2 ) in reference . co_iter ( reference ) : if segment1 == segment2 and track1 == track2 : continue overlap_regions . append ( segment1 & segment2 ) segments = collars + overlap_regions return Timeline ( segments = segments ) . support ( ) . gaps ( support = uem )
Extrude reference boundary collars from uem
51,607
def common_timeline ( self , reference , hypothesis ) : timeline = reference . get_timeline ( copy = True ) timeline . update ( hypothesis . get_timeline ( copy = False ) ) return timeline . segmentation ( )
Return timeline common to both reference and hypothesis
51,608
def project ( self , annotation , timeline ) : projection = annotation . empty ( ) timeline_ = annotation . get_timeline ( copy = False ) for segment_ , segment in timeline_ . co_iter ( timeline ) : for track_ in annotation . get_tracks ( segment_ ) : track = projection . new_track ( segment , candidate = track_ ) projection [ segment , track ] = annotation [ segment_ , track_ ] return projection
Project annotation onto timeline segments
51,609
def uemify ( self , reference , hypothesis , uem = None , collar = 0. , skip_overlap = False , returns_uem = False , returns_timeline = False ) : if uem is None : r_extent = reference . get_timeline ( ) . extent ( ) h_extent = hypothesis . get_timeline ( ) . extent ( ) extent = r_extent | h_extent uem = Timeline ( segments = [ extent ] if extent else [ ] , uri = reference . uri ) warnings . warn ( "'uem' was approximated by the union of 'reference' " "and 'hypothesis' extents." ) uem = self . extrude ( uem , reference , collar = collar , skip_overlap = skip_overlap ) reference = reference . crop ( uem , mode = 'intersection' ) hypothesis = hypothesis . crop ( uem , mode = 'intersection' ) if returns_timeline : timeline = self . common_timeline ( reference , hypothesis ) reference = self . project ( reference , timeline ) hypothesis = self . project ( hypothesis , timeline ) result = ( reference , hypothesis ) if returns_uem : result += ( uem , ) if returns_timeline : result += ( timeline , ) return result
Crop reference and hypothesis to uem support
51,610
def get_hypothesis ( hypotheses , current_file ) : uri = current_file [ 'uri' ] if uri in hypotheses : return hypotheses [ uri ] tmp_uri = [ u for u in hypotheses if u in uri ] if len ( tmp_uri ) == 0 : msg = f'Could not find hypothesis for file "{uri}"; assuming empty file.' warnings . warn ( msg ) return Annotation ( uri = uri , modality = 'speaker' ) if len ( tmp_uri ) == 1 : hypothesis = hypotheses [ tmp_uri [ 0 ] ] hypothesis . uri = uri return hypothesis msg = f'Found too many hypotheses matching file "{uri}" ({uris}).' raise ValueError ( msg . format ( uri = uri , uris = tmp_uri ) )
Get hypothesis for given file
51,611
def reindex ( report ) : index = list ( report . index ) i = index . index ( 'TOTAL' ) return report . reindex ( index [ : i ] + index [ i + 1 : ] + [ 'TOTAL' ] )
Reindex report so that TOTAL is the last row
51,612
def precision_recall_curve ( y_true , scores , distances = False ) : if distances : scores = - scores precision , recall , thresholds = sklearn . metrics . precision_recall_curve ( y_true , scores , pos_label = True ) if distances : thresholds = - thresholds auc = sklearn . metrics . auc ( precision , recall , reorder = True ) return precision , recall , thresholds , auc
Precision - recall curve
51,613
def difference ( self , reference , hypothesis , uem = None , uemified = False ) : R , H , common_timeline = self . uemify ( reference , hypothesis , uem = uem , collar = self . collar , skip_overlap = self . skip_overlap , returns_timeline = True ) errors = Annotation ( uri = reference . uri , modality = reference . modality ) for segment in common_timeline : rlabels = R . get_labels ( segment , unique = False ) hlabels = H . get_labels ( segment , unique = False ) _ , details = self . matcher ( rlabels , hlabels ) for r , h in details [ MATCH_CORRECT ] : track = errors . new_track ( segment , prefix = MATCH_CORRECT ) errors [ segment , track ] = ( MATCH_CORRECT , r , h ) for r , h in details [ MATCH_CONFUSION ] : track = errors . new_track ( segment , prefix = MATCH_CONFUSION ) errors [ segment , track ] = ( MATCH_CONFUSION , r , h ) for r in details [ MATCH_MISSED_DETECTION ] : track = errors . new_track ( segment , prefix = MATCH_MISSED_DETECTION ) errors [ segment , track ] = ( MATCH_MISSED_DETECTION , r , None ) for h in details [ MATCH_FALSE_ALARM ] : track = errors . new_track ( segment , prefix = MATCH_FALSE_ALARM ) errors [ segment , track ] = ( MATCH_FALSE_ALARM , None , h ) if uemified : return reference , hypothesis , errors else : return errors
Get error analysis as Annotation
51,614
def reset ( self ) : if self . parallel : from pyannote . metrics import manager_ self . accumulated_ = manager_ . dict ( ) self . results_ = manager_ . list ( ) self . uris_ = manager_ . dict ( ) else : self . accumulated_ = dict ( ) self . results_ = list ( ) self . uris_ = dict ( ) for value in self . components_ : self . accumulated_ [ value ] = 0.
Reset accumulated components and metric values
51,615
def confidence_interval ( self , alpha = 0.9 ) : m , _ , _ = scipy . stats . bayes_mvs ( [ r [ self . metric_name_ ] for _ , r in self . results_ ] , alpha = alpha ) return m
Compute confidence interval on accumulated metric values
51,616
def compute_metric ( self , components ) : numerator = components [ PRECISION_RELEVANT_RETRIEVED ] denominator = components [ PRECISION_RETRIEVED ] if denominator == 0. : if numerator == 0 : return 1. else : raise ValueError ( '' ) else : return numerator / denominator
Compute precision from components
51,617
def compute_metric ( self , components ) : numerator = components [ RECALL_RELEVANT_RETRIEVED ] denominator = components [ RECALL_RELEVANT ] if denominator == 0. : if numerator == 0 : return 1. else : raise ValueError ( '' ) else : return numerator / denominator
Compute recall from components
51,618
def optimal_mapping ( self , reference , hypothesis , uem = None ) : if uem : reference , hypothesis = self . uemify ( reference , hypothesis , uem = uem ) mapping = self . mapper_ ( hypothesis , reference ) return mapping
Optimal label mapping
51,619
def greedy_mapping ( self , reference , hypothesis , uem = None ) : if uem : reference , hypothesis = self . uemify ( reference , hypothesis , uem = uem ) return self . mapper_ ( hypothesis , reference )
Greedy label mapping
51,620
def default_absorbers ( Tatm , ozone_file = 'apeozone_cam3_5_54.nc' , verbose = True , ) : absorber_vmr = { } absorber_vmr [ 'CO2' ] = 348. / 1E6 absorber_vmr [ 'CH4' ] = 1650. / 1E9 absorber_vmr [ 'N2O' ] = 306. / 1E9 absorber_vmr [ 'O2' ] = 0.21 absorber_vmr [ 'CFC11' ] = 0. absorber_vmr [ 'CFC12' ] = 0. absorber_vmr [ 'CFC22' ] = 0. absorber_vmr [ 'CCL4' ] = 0. xTatm = Tatm . to_xarray ( ) O3 = 0. * xTatm if ozone_file is not None : ozonefilepath = os . path . join ( os . path . dirname ( __file__ ) , 'data' , 'ozone' , ozone_file ) remotepath_http = 'http://thredds.atmos.albany.edu:8080/thredds/fileServer/CLIMLAB/ozone/' + ozone_file remotepath_opendap = 'http://thredds.atmos.albany.edu:8080/thredds/dodsC/CLIMLAB/ozone/' + ozone_file ozonedata , path = load_data_source ( local_path = ozonefilepath , remote_source_list = [ remotepath_http , remotepath_opendap ] , open_method = xr . open_dataset , remote_kwargs = { 'engine' : 'pydap' } , verbose = verbose , ) ozone_zon = ozonedata . OZONE . mean ( dim = ( 'time' , 'lon' ) ) . transpose ( 'lat' , 'lev' ) if ( 'lat' in xTatm . dims ) : O3source = ozone_zon else : weight = np . cos ( np . deg2rad ( ozonedata . lat ) ) ozone_global = ( ozone_zon * weight ) . mean ( dim = 'lat' ) / weight . mean ( dim = 'lat' ) O3source = ozone_global try : O3 = O3source . interp_like ( xTatm ) assert not np . any ( np . isnan ( O3 ) ) except : warnings . warn ( 'Some grid points are beyond the bounds of the ozone file. Ozone values will be extrapolated.' ) try : O3 = O3source . interp_like ( xTatm , kwargs = { 'fill_value' : None } ) assert not np . any ( np . isnan ( O3 ) ) except : warnings . warn ( 'Interpolation of ozone data failed. Setting O3 to zero instead.' ) O3 = 0. * xTatm absorber_vmr [ 'O3' ] = O3 . values return absorber_vmr
Initialize a dictionary of well - mixed radiatively active gases All values are volumetric mixing ratios .
51,621
def init_interface ( field ) : interface_shape = np . array ( field . shape ) interface_shape [ - 1 ] += 1 interfaces = np . tile ( False , len ( interface_shape ) ) interfaces [ - 1 ] = True interface_zero = Field ( np . zeros ( interface_shape ) , domain = field . domain , interfaces = interfaces ) return interface_zero
Return a Field object defined at the vertical interfaces of the input Field object .
51,622
def convective_adjustment_direct ( p , T , c , lapserate = 6.5 ) : alpha = const . Rd / const . g * lapserate / 1.E3 L = p . size pextended = np . insert ( p , 0 , const . ps ) Pi = np . cumprod ( ( p / pextended [ : - 1 ] ) ** alpha ) beta = 1. / Pi theta = T * beta q = Pi * c n_k = np . zeros ( L , dtype = np . int8 ) theta_k = np . zeros_like ( p ) s_k = np . zeros_like ( p ) t_k = np . zeros_like ( p ) thetaadj = Akmaev_adjustment_multidim ( theta , q , beta , n_k , theta_k , s_k , t_k ) T = thetaadj * Pi return T
Convective Adjustment to a specified lapse rate .
51,623
def Akmaev_adjustment ( theta , q , beta , n_k , theta_k , s_k , t_k ) : L = q . size k = 1 n_k [ k - 1 ] = 1 theta_k [ k - 1 ] = theta [ k - 1 ] l = 2 while True : n = 1 thistheta = theta [ l - 1 ] while True : if theta_k [ k - 1 ] <= thistheta : k += 1 break else : if n <= 1 : s = q [ l - 1 ] t = s * thistheta if n_k [ k - 1 ] <= 1 : s_k [ k - 1 ] = q [ l - n - 1 ] t_k [ k - 1 ] = s_k [ k - 1 ] * theta_k [ k - 1 ] n += n_k [ k - 1 ] s += s_k [ k - 1 ] t += t_k [ k - 1 ] s_k [ k - 1 ] = s t_k [ k - 1 ] = t thistheta = t / s if k == 1 : break k -= 1 if l == L : break l += 1 n_k [ k - 1 ] = n theta_k [ k - 1 ] = thistheta while True : while True : if n == 1 : break while True : theta [ l - 1 ] = thistheta if n == 1 : break l -= 1 n -= 1 if k == 1 : break k -= 1 l -= 1 n = n_k [ k - 1 ] thistheta = theta_k [ k - 1 ] return theta
Single column only .
51,624
def do_diagnostics ( self ) : self . OLR = self . subprocess [ 'LW' ] . flux_to_space self . LW_down_sfc = self . subprocess [ 'LW' ] . flux_to_sfc self . LW_up_sfc = self . subprocess [ 'LW' ] . flux_from_sfc self . LW_absorbed_sfc = self . LW_down_sfc - self . LW_up_sfc self . LW_absorbed_atm = self . subprocess [ 'LW' ] . absorbed self . LW_emission = self . subprocess [ 'LW' ] . emission self . ASR = ( self . subprocess [ 'SW' ] . flux_from_space - self . subprocess [ 'SW' ] . flux_to_space ) self . SW_absorbed_atm = self . subprocess [ 'SW' ] . absorbed self . SW_down_sfc = self . subprocess [ 'SW' ] . flux_to_sfc self . SW_up_sfc = self . subprocess [ 'SW' ] . flux_from_sfc self . SW_absorbed_sfc = self . SW_down_sfc - self . SW_up_sfc self . SW_up_TOA = self . subprocess [ 'SW' ] . flux_to_space self . SW_down_TOA = self . subprocess [ 'SW' ] . flux_from_space self . planetary_albedo = ( self . subprocess [ 'SW' ] . flux_to_space / self . subprocess [ 'SW' ] . flux_from_space )
Set all the diagnostics from long and shortwave radiation .
51,625
def clausius_clapeyron ( T ) : Tcel = T - tempCtoK es = 6.112 * exp ( 17.67 * Tcel / ( Tcel + 243.5 ) ) return es
Compute saturation vapor pressure as function of temperature T .
51,626
def qsat ( T , p ) : es = clausius_clapeyron ( T ) q = eps * es / ( p - ( 1 - eps ) * es ) return q
Compute saturation specific humidity as function of temperature and pressure .
51,627
def pseudoadiabat ( T , p ) : esoverp = clausius_clapeyron ( T ) / p Tcel = T - tempCtoK L = ( 2.501 - 0.00237 * Tcel ) * 1.E6 ratio = L / T / Rv dTdp = ( T / p * kappa * ( 1 + esoverp * ratio ) / ( 1 + kappa * ( cpv / Rv + ( ratio - 1 ) * ratio ) * esoverp ) ) return dTdp
Compute the local slope of the pseudoadiabat at given temperature and pressure
51,628
def _solve_implicit_banded ( current , banded_matrix ) : J = banded_matrix . shape [ 0 ] diag = np . zeros ( ( 3 , J ) ) diag [ 1 , : ] = np . diag ( banded_matrix , k = 0 ) diag [ 0 , 1 : ] = np . diag ( banded_matrix , k = 1 ) diag [ 2 , : - 1 ] = np . diag ( banded_matrix , k = - 1 ) return solve_banded ( ( 1 , 1 ) , diag , current )
Uses a banded solver for matrix inversion of a tridiagonal matrix .
51,629
def _guess_diffusion_axis ( process_or_domain ) : axes = get_axes ( process_or_domain ) diff_ax = { } for axname , ax in axes . items ( ) : if ax . num_points > 1 : diff_ax . update ( { axname : ax } ) if len ( list ( diff_ax . keys ( ) ) ) == 1 : return list ( diff_ax . keys ( ) ) [ 0 ] else : raise ValueError ( 'More than one possible diffusion axis.' )
Scans given process domain or dictionary of domains for a diffusion axis and returns appropriate name .
51,630
def _implicit_solver ( self ) : newstate = { } for varname , value in self . state . items ( ) : if self . use_banded_solver : newvar = _solve_implicit_banded ( value , self . _diffTriDiag ) else : newvar = np . linalg . solve ( self . _diffTriDiag , value ) newstate [ varname ] = newvar return newstate
Invertes and solves the matrix problem for diffusion matrix and temperature T .
51,631
def _compute_fixed ( self ) : try : lon , lat = np . meshgrid ( self . lon , self . lat ) except : lat = self . lat phi = np . deg2rad ( lat ) try : albedo = self . a0 + self . a2 * P2 ( np . sin ( phi ) ) except : albedo = np . zeros_like ( phi ) dom = next ( iter ( self . domains . values ( ) ) ) self . albedo = Field ( albedo , domain = dom )
Recompute any fixed quantities after a change in parameters
51,632
def find_icelines ( self ) : Tf = self . param [ 'Tf' ] Ts = self . state [ 'Ts' ] lat_bounds = self . domains [ 'Ts' ] . axes [ 'lat' ] . bounds self . noice = np . where ( Ts >= Tf , True , False ) self . ice = np . where ( Ts < Tf , True , False ) self . ice_area = global_mean ( self . ice * np . ones_like ( self . Ts ) ) if self . ice . all ( ) : self . icelat = np . array ( [ - 0. , 0. ] ) elif self . noice . all ( ) : self . icelat = np . array ( [ - 90. , 90. ] ) else : boundary_indices = np . where ( np . diff ( self . ice . squeeze ( ) ) ) [ 0 ] + 1 if boundary_indices . size == 1 : if self . ice [ 0 ] == True : boundary_indices = np . append ( boundary_indices , self . ice . size ) elif self . ice [ - 1 ] == True : boundary_indices = np . insert ( boundary_indices , 0 , 0 ) if boundary_indices . size == 1 : if self . ice [ 0 ] == True : boundary_indices = np . append ( boundary_indices , self . ice . size ) elif self . ice [ - 1 ] == True : boundary_indices = np . insert ( boundary_indices , 0 , 0 ) self . icelat = lat_bounds [ boundary_indices ]
Finds iceline according to the surface temperature .
51,633
def _get_current_albedo ( self ) : ice = self . subprocess [ 'iceline' ] . ice cold_albedo = self . subprocess [ 'cold_albedo' ] . albedo warm_albedo = self . subprocess [ 'warm_albedo' ] . albedo albedo = Field ( np . where ( ice , cold_albedo , warm_albedo ) , domain = self . domains [ 'Ts' ] ) return albedo
Simple step - function albedo based on ice line at temperature Tf .
51,634
def process_like ( proc ) : newproc = copy . deepcopy ( proc ) newproc . creation_date = time . strftime ( "%a, %d %b %Y %H:%M:%S %z" , time . localtime ( ) ) return newproc
Make an exact clone of a process including state and all subprocesses .
51,635
def get_axes ( process_or_domain ) : if isinstance ( process_or_domain , Process ) : dom = process_or_domain . domains else : dom = process_or_domain if isinstance ( dom , _Domain ) : return dom . axes elif isinstance ( dom , dict ) : axes = { } for thisdom in list ( dom . values ( ) ) : assert isinstance ( thisdom , _Domain ) axes . update ( thisdom . axes ) return axes else : raise TypeError ( 'dom must be a domain or dictionary of domains.' )
Returns a dictionary of all Axis in a domain or dictionary of domains .
51,636
def add_subprocesses ( self , procdict ) : if isinstance ( procdict , Process ) : try : name = procdict . name except : name = 'default' self . add_subprocess ( name , procdict ) else : for name , proc in procdict . items ( ) : self . add_subprocess ( name , proc )
Adds a dictionary of subproceses to this process .
51,637
def add_subprocess ( self , name , proc ) : if isinstance ( proc , Process ) : self . subprocess . update ( { name : proc } ) self . has_process_type_list = False for diagname , value in proc . diagnostics . items ( ) : self . add_diagnostic ( diagname , value ) else : raise ValueError ( 'subprocess must be Process object' )
Adds a single subprocess to this process .
51,638
def remove_subprocess ( self , name , verbose = True ) : try : self . subprocess . pop ( name ) except KeyError : if verbose : print ( 'WARNING: {} not found in subprocess dictionary.' . format ( name ) ) self . has_process_type_list = False
Removes a single subprocess from this process .
51,639
def set_state ( self , name , value ) : if isinstance ( value , Field ) : self . domains . update ( { name : value . domain } ) else : try : thisdom = self . state [ name ] . domain domshape = thisdom . shape except : raise ValueError ( 'State variable needs a domain.' ) value = np . atleast_1d ( value ) if value . shape == domshape : value = Field ( value , domain = thisdom ) else : raise ValueError ( 'Shape mismatch between existing domain and new state variable.' ) self . state [ name ] = value for name , value in self . state . items ( ) : if np . issubdtype ( self . state [ name ] . dtype , np . dtype ( 'int' ) . type ) : value = self . state [ name ] . astype ( float ) self . state [ name ] = value self . __setattr__ ( name , value )
Sets the variable name to a new state value .
51,640
def _add_field ( self , field_type , name , value ) : try : self . __getattribute__ ( field_type ) . update ( { name : value } ) except : raise ValueError ( 'Problem with field_type %s' % field_type ) self . __setattr__ ( name , value )
Adds a new field to a specified dictionary . The field is also added as a process attribute . field_type can be input diagnostics
51,641
def add_diagnostic ( self , name , value = None ) : self . _diag_vars . append ( name ) self . __setattr__ ( name , value )
Create a new diagnostic variable called name for this process and initialize it with the given value .
51,642
def add_input ( self , name , value = None ) : self . _input_vars . append ( name ) self . __setattr__ ( name , value )
Create a new input variable called name for this process and initialize it with the given value .
51,643
def remove_diagnostic ( self , name ) : try : delattr ( self , name ) self . _diag_vars . remove ( name ) except : print ( 'No diagnostic named {} was found.' . format ( name ) )
Removes a diagnostic from the process . diagnostic dictionary and also delete the associated process attribute .
51,644
def to_xarray ( self , diagnostics = False ) : if diagnostics : dic = self . state . copy ( ) dic . update ( self . diagnostics ) return state_to_xarray ( dic ) else : return state_to_xarray ( self . state )
Convert process variables to xarray . Dataset format .
51,645
def diagnostics ( self ) : diag_dict = { } for key in self . _diag_vars : try : diag_dict [ key ] = self . __dict__ [ key ] except : pass return diag_dict
Dictionary access to all diagnostic variables
51,646
def input ( self ) : input_dict = { } for key in self . _input_vars : try : input_dict [ key ] = getattr ( self , key ) except : pass return input_dict
Dictionary access to all input variables
51,647
def _get_Berger_data ( verbose = True ) : orbit91_pd , path = load_data_source ( local_path = local_path , remote_source_list = [ threddspath , NCDCpath ] , open_method = pd . read_csv , open_method_kwargs = { 'delim_whitespace' : True , 'skiprows' : 1 } , verbose = verbose , ) orbit = xr . Dataset ( orbit91_pd ) . rename ( { 'dim_0' : 'kyear' } ) orbit = orbit . rename ( { 'ECC' : 'ecc' , 'OMEGA' : 'long_peri' , 'OBL' : 'obliquity' , 'PREC' : 'precession' } ) orbit [ 'long_peri' ] += 180. orbit [ 'precession' ] *= - 1. orbit . attrs [ 'Description' ] = 'The Berger and Loutre (1991) orbital data table' orbit . attrs [ 'Citation' ] = 'https://doi.org/10.1016/0277-3791(91)90033-Q' orbit . attrs [ 'Source' ] = path orbit . attrs [ 'Note' ] = 'Longitude of perihelion is defined to be 0 degrees at Northern Vernal Equinox. This differs by 180 degrees from orbit91 source file.' return orbit
Read in the Berger and Loutre orbital table as a pandas dataframe convert to xarray
51,648
def load_data_source ( local_path , remote_source_list , open_method , open_method_kwargs = dict ( ) , remote_kwargs = dict ( ) , verbose = True ) : try : path = local_path data = open_method ( path , ** open_method_kwargs ) if verbose : print ( 'Opened data from {}' . format ( path ) ) except IOError : for source in remote_source_list : try : response = _download_and_cache ( source , local_path ) data = open_method ( local_path , ** open_method_kwargs ) if verbose : print ( 'Data retrieved from {} and saved locally.' . format ( source ) ) break except Exception : continue else : for source in remote_source_list : path = source try : data = open_method ( path , ** merge_two_dicts ( open_method_kwargs , remote_kwargs ) ) if verbose : print ( 'Opened data remotely from {}' . format ( source ) ) break except Exception : continue else : raise Exception ( 'All data access methods have failed.' ) return data , path
Flexible data retreiver to download and cache the data files locally .
51,649
def tril ( array , k = 0 ) : try : tril_array = np . tril ( array , k = k ) except : tril_array = np . zeros_like ( array ) shape = array . shape otherdims = shape [ : - 2 ] for index in np . ndindex ( otherdims ) : tril_array [ index ] = np . tril ( array [ index ] , k = k ) return tril_array
Lower triangle of an array . Return a copy of an array with elements above the k - th diagonal zeroed . Need a multi - dimensional version here because numpy . tril does not broadcast for numpy verison < 1 . 9 .
51,650
def flux_up ( self , fluxUpBottom , emission = None ) : if emission is None : emission = np . zeros_like ( self . absorptivity ) E = np . concatenate ( ( emission , np . atleast_1d ( fluxUpBottom ) ) , axis = - 1 ) return np . squeeze ( matrix_multiply ( self . Tup , E [ ... , np . newaxis ] ) )
Compute downwelling radiative flux at interfaces between layers .
51,651
def flux_down ( self , fluxDownTop , emission = None ) : if emission is None : emission = np . zeros_like ( self . absorptivity ) E = np . concatenate ( ( np . atleast_1d ( fluxDownTop ) , emission ) , axis = - 1 ) return np . squeeze ( matrix_multiply ( self . Tdown , E [ ... , np . newaxis ] ) )
Compute upwelling radiative flux at interfaces between layers .
51,652
def _compute_heating_rates ( self ) : ( ncol , nlay , icld , permuteseed , irng , idrv , cp , play , plev , tlay , tlev , tsfc , h2ovmr , o3vmr , co2vmr , ch4vmr , n2ovmr , o2vmr , cfc11vmr , cfc12vmr , cfc22vmr , ccl4vmr , emis , inflglw , iceflglw , liqflglw , cldfrac , ciwp , clwp , reic , relq , tauc , tauaer , ) = self . _prepare_lw_arguments ( ) if icld == 0 : cldfmcl = np . zeros ( ( ngptlw , ncol , nlay ) ) ciwpmcl = np . zeros ( ( ngptlw , ncol , nlay ) ) clwpmcl = np . zeros ( ( ngptlw , ncol , nlay ) ) reicmcl = np . zeros ( ( ncol , nlay ) ) relqmcl = np . zeros ( ( ncol , nlay ) ) taucmcl = np . zeros ( ( ngptlw , ncol , nlay ) ) else : ( cldfmcl , ciwpmcl , clwpmcl , reicmcl , relqmcl , taucmcl ) = _rrtmg_lw . climlab_mcica_subcol_lw ( ncol , nlay , icld , permuteseed , irng , play , cldfrac , ciwp , clwp , reic , relq , tauc ) ( uflx , dflx , hr , uflxc , dflxc , hrc , duflx_dt , duflxc_dt ) = _rrtmg_lw . climlab_rrtmg_lw ( ncol , nlay , icld , idrv , play , plev , tlay , tlev , tsfc , h2ovmr , o3vmr , co2vmr , ch4vmr , n2ovmr , o2vmr , cfc11vmr , cfc12vmr , cfc22vmr , ccl4vmr , emis , inflglw , iceflglw , liqflglw , cldfmcl , taucmcl , ciwpmcl , clwpmcl , reicmcl , relqmcl , tauaer ) self . LW_flux_up = _rrtm_to_climlab ( uflx ) + 0. * self . LW_flux_up self . LW_flux_down = _rrtm_to_climlab ( dflx ) + 0. * self . LW_flux_down self . LW_flux_up_clr = _rrtm_to_climlab ( uflxc ) + 0. * self . LW_flux_up_clr self . LW_flux_down_clr = _rrtm_to_climlab ( dflxc ) + 0. * self . LW_flux_down_clr self . _compute_LW_flux_diagnostics ( ) LWheating_Wm2 = np . array ( np . diff ( self . LW_flux_net , axis = - 1 ) ) + 0. * self . Tatm LWheating_clr_Wm2 = np . array ( np . diff ( self . LW_flux_net_clr , axis = - 1 ) ) + 0. * self . Tatm self . heating_rate [ 'Ts' ] = np . array ( - self . LW_flux_net [ ... , - 1 , np . newaxis ] ) + 0. * self . Ts self . heating_rate [ 'Tatm' ] = LWheating_Wm2 Catm = self . Tatm . domain . heat_capacity self . TdotLW = LWheating_Wm2 / Catm * const . seconds_per_day self . TdotLW_clr = LWheating_clr_Wm2 / Catm * const . seconds_per_day
Prepare arguments and call the RRTGM_LW driver to calculate radiative fluxes and heating rates
51,653
def _prepare_general_arguments ( RRTMGobject ) : tlay = _climlab_to_rrtm ( RRTMGobject . Tatm ) tlev = _climlab_to_rrtm ( interface_temperature ( ** RRTMGobject . state ) ) play = _climlab_to_rrtm ( RRTMGobject . lev * np . ones_like ( tlay ) ) plev = _climlab_to_rrtm ( RRTMGobject . lev_bounds * np . ones_like ( tlev ) ) ncol , nlay = tlay . shape tsfc = _climlab_to_rrtm_sfc ( RRTMGobject . Ts , RRTMGobject . Ts ) vapor_mixing_ratio = mmr_to_vmr ( RRTMGobject . specific_humidity , gas = 'H2O' ) h2ovmr = _climlab_to_rrtm ( vapor_mixing_ratio * np . ones_like ( RRTMGobject . Tatm ) ) o3vmr = _climlab_to_rrtm ( RRTMGobject . absorber_vmr [ 'O3' ] * np . ones_like ( RRTMGobject . Tatm ) ) co2vmr = _climlab_to_rrtm ( RRTMGobject . absorber_vmr [ 'CO2' ] * np . ones_like ( RRTMGobject . Tatm ) ) ch4vmr = _climlab_to_rrtm ( RRTMGobject . absorber_vmr [ 'CH4' ] * np . ones_like ( RRTMGobject . Tatm ) ) n2ovmr = _climlab_to_rrtm ( RRTMGobject . absorber_vmr [ 'N2O' ] * np . ones_like ( RRTMGobject . Tatm ) ) o2vmr = _climlab_to_rrtm ( RRTMGobject . absorber_vmr [ 'O2' ] * np . ones_like ( RRTMGobject . Tatm ) ) cfc11vmr = _climlab_to_rrtm ( RRTMGobject . absorber_vmr [ 'CFC11' ] * np . ones_like ( RRTMGobject . Tatm ) ) cfc12vmr = _climlab_to_rrtm ( RRTMGobject . absorber_vmr [ 'CFC12' ] * np . ones_like ( RRTMGobject . Tatm ) ) cfc22vmr = _climlab_to_rrtm ( RRTMGobject . absorber_vmr [ 'CFC22' ] * np . ones_like ( RRTMGobject . Tatm ) ) ccl4vmr = _climlab_to_rrtm ( RRTMGobject . absorber_vmr [ 'CCL4' ] * np . ones_like ( RRTMGobject . Tatm ) ) cldfrac = _climlab_to_rrtm ( RRTMGobject . cldfrac * np . ones_like ( RRTMGobject . Tatm ) ) ciwp = _climlab_to_rrtm ( RRTMGobject . ciwp * np . ones_like ( RRTMGobject . Tatm ) ) clwp = _climlab_to_rrtm ( RRTMGobject . clwp * np . ones_like ( RRTMGobject . Tatm ) ) relq = _climlab_to_rrtm ( RRTMGobject . r_liq * np . ones_like ( RRTMGobject . Tatm ) ) reic = _climlab_to_rrtm ( RRTMGobject . r_ice * np . ones_like ( RRTMGobject . Tatm ) ) return ( ncol , nlay , play , plev , tlay , tlev , tsfc , h2ovmr , o3vmr , co2vmr , ch4vmr , n2ovmr , o2vmr , cfc11vmr , cfc12vmr , cfc12vmr , cfc22vmr , ccl4vmr , cldfrac , ciwp , clwp , relq , reic )
Prepare arguments needed for both RRTMG_SW and RRTMG_LW with correct dimensions .
51,654
def interface_temperature ( Ts , Tatm , ** kwargs ) : lev = Tatm . domain . axes [ 'lev' ] . points lev_bounds = Tatm . domain . axes [ 'lev' ] . bounds f = interp1d ( lev , Tatm , axis = - 1 ) Tinterp = f ( lev_bounds [ 1 : - 1 ] ) Ttoa = Tatm [ ... , 0 ] Tinterp = np . concatenate ( ( Ttoa [ ... , np . newaxis ] , Tinterp , Ts ) , axis = - 1 ) return Tinterp
Compute temperature at model layer interfaces .
51,655
def moist_amplification_factor ( Tkelvin , relative_humidity = 0.8 ) : deltaT = 0.01 dqsdTs = ( qsat ( Tkelvin + deltaT / 2 , 1000. ) - qsat ( Tkelvin - deltaT / 2 , 1000. ) ) / deltaT return const . Lhvap / const . cp * relative_humidity * dqsdTs
Compute the moisture amplification factor for the moist diffusivity given relative humidity and reference temperature profile .
51,656
def daily_insolation ( lat , day , orb = const . orb_present , S0 = const . S0 , day_type = 1 ) : lat_is_xarray = True day_is_xarray = True if type ( lat ) is np . ndarray : lat_is_xarray = False lat = xr . DataArray ( lat , coords = [ lat ] , dims = [ 'lat' ] ) if type ( day ) is np . ndarray : day_is_xarray = False day = xr . DataArray ( day , coords = [ day ] , dims = [ 'day' ] ) ecc = orb [ 'ecc' ] long_peri = orb [ 'long_peri' ] obliquity = orb [ 'obliquity' ] phi = deg2rad ( lat ) if day_type == 1 : lambda_long = solar_longitude ( day , orb ) elif day_type == 2 : lambda_long = deg2rad ( day ) else : raise ValueError ( 'Invalid day_type.' ) delta = arcsin ( sin ( deg2rad ( obliquity ) ) * sin ( lambda_long ) ) oldsettings = np . seterr ( invalid = 'ignore' ) Ho = xr . where ( abs ( delta ) - pi / 2 + abs ( phi ) < 0. , arccos ( - tan ( phi ) * tan ( delta ) ) , xr . where ( phi * delta > 0. , pi , 0. ) ) coszen = Ho * sin ( phi ) * sin ( delta ) + cos ( phi ) * cos ( delta ) * sin ( Ho ) Fsw = S0 / pi * ( ( 1 + ecc * cos ( lambda_long - deg2rad ( long_peri ) ) ) ** 2 / ( 1 - ecc ** 2 ) ** 2 * coszen ) if not ( lat_is_xarray or day_is_xarray ) : return Fsw . transpose ( ) . values else : return Fsw
Compute daily average insolation given latitude time of year and orbital parameters .
51,657
def solar_longitude ( day , orb = const . orb_present , days_per_year = None ) : if days_per_year is None : days_per_year = const . days_per_year ecc = orb [ 'ecc' ] long_peri_rad = deg2rad ( orb [ 'long_peri' ] ) delta_lambda = ( day - 80. ) * 2 * pi / days_per_year beta = sqrt ( 1 - ecc ** 2 ) lambda_long_m = - 2 * ( ( ecc / 2 + ( ecc ** 3 ) / 8 ) * ( 1 + beta ) * sin ( - long_peri_rad ) - ( ecc ** 2 ) / 4 * ( 1 / 2 + beta ) * sin ( - 2 * long_peri_rad ) + ( ecc ** 3 ) / 8 * ( 1 / 3 + beta ) * sin ( - 3 * long_peri_rad ) ) + delta_lambda lambda_long = ( lambda_long_m + ( 2 * ecc - ( ecc ** 3 ) / 4 ) * sin ( lambda_long_m - long_peri_rad ) + ( 5 / 4 ) * ( ecc ** 2 ) * sin ( 2 * ( lambda_long_m - long_peri_rad ) ) + ( 13 / 12 ) * ( ecc ** 3 ) * sin ( 3 * ( lambda_long_m - long_peri_rad ) ) ) return lambda_long
Estimates solar longitude from calendar day .
51,658
def single_column ( num_lev = 30 , water_depth = 1. , lev = None , ** kwargs ) : if lev is None : levax = Axis ( axis_type = 'lev' , num_points = num_lev ) elif isinstance ( lev , Axis ) : levax = lev else : try : levax = Axis ( axis_type = 'lev' , points = lev ) except : raise ValueError ( 'lev must be Axis object or pressure array' ) depthax = Axis ( axis_type = 'depth' , bounds = [ water_depth , 0. ] ) slab = SlabOcean ( axes = depthax , ** kwargs ) atm = Atmosphere ( axes = levax , ** kwargs ) return slab , atm
Creates domains for a single column of atmosphere overlying a slab of water .
51,659
def zonal_mean_surface ( num_lat = 90 , water_depth = 10. , lat = None , ** kwargs ) : if lat is None : latax = Axis ( axis_type = 'lat' , num_points = num_lat ) elif isinstance ( lat , Axis ) : latax = lat else : try : latax = Axis ( axis_type = 'lat' , points = lat ) except : raise ValueError ( 'lat must be Axis object or latitude array' ) depthax = Axis ( axis_type = 'depth' , bounds = [ water_depth , 0. ] ) axes = { 'depth' : depthax , 'lat' : latax } slab = SlabOcean ( axes = axes , ** kwargs ) return slab
Creates a 1D slab ocean Domain in latitude with uniform water depth .
51,660
def surface_2D ( num_lat = 90 , num_lon = 180 , water_depth = 10. , lon = None , lat = None , ** kwargs ) : if lat is None : latax = Axis ( axis_type = 'lat' , num_points = num_lat ) elif isinstance ( lat , Axis ) : latax = lat else : try : latax = Axis ( axis_type = 'lat' , points = lat ) except : raise ValueError ( 'lat must be Axis object or latitude array' ) if lon is None : lonax = Axis ( axis_type = 'lon' , num_points = num_lon ) elif isinstance ( lon , Axis ) : lonax = lon else : try : lonax = Axis ( axis_type = 'lon' , points = lon ) except : raise ValueError ( 'lon must be Axis object or longitude array' ) depthax = Axis ( axis_type = 'depth' , bounds = [ water_depth , 0. ] ) axes = { 'lat' : latax , 'lon' : lonax , 'depth' : depthax } slab = SlabOcean ( axes = axes , ** kwargs ) return slab
Creates a 2D slab ocean Domain in latitude and longitude with uniform water depth .
51,661
def _make_axes_dict ( self , axes ) : if type ( axes ) is dict : axdict = axes elif type ( axes ) is Axis : ax = axes axdict = { ax . axis_type : ax } elif axes is None : axdict = { 'empty' : None } else : raise ValueError ( 'axes needs to be Axis object or dictionary of Axis object' ) return axdict
Makes an axes dictionary .
51,662
def _compute ( self ) : newstate = self . _implicit_solver ( ) adjustment = { } tendencies = { } for name , var in self . state . items ( ) : adjustment [ name ] = newstate [ name ] - var tendencies [ name ] = adjustment [ name ] / self . timestep self . adjustment = adjustment self . _update_diagnostics ( newstate ) return tendencies
Computes the state variable tendencies in time for implicit processes .
51,663
def walk_processes ( top , topname = 'top' , topdown = True , ignoreFlag = False ) : if not ignoreFlag : flag = topdown else : flag = True proc = top level = 0 if flag : yield topname , proc , level if len ( proc . subprocess ) > 0 : level += 1 for name , subproc in proc . subprocess . items ( ) : for name2 , subproc2 , level2 in walk_processes ( subproc , topname = name , topdown = subproc . topdown , ignoreFlag = ignoreFlag ) : yield name2 , subproc2 , level + level2 if not flag : yield topname , proc , level
Generator for recursive tree of climlab processes
51,664
def process_tree ( top , name = 'top' ) : str1 = '' for name , proc , level in walk_processes ( top , name , ignoreFlag = True ) : indent = ' ' * 3 * ( level ) str1 += ( '{}{}: {}\n' . format ( indent , name , type ( proc ) ) ) return str1
Creates a string representation of the process tree for process top .
51,665
def _compute_fluxes ( self ) : self . emission = self . _compute_emission ( ) self . emission_sfc = self . _compute_emission_sfc ( ) fromspace = self . _from_space ( ) self . flux_down = self . trans . flux_down ( fromspace , self . emission ) self . flux_reflected_up = self . trans . flux_reflected_up ( self . flux_down , self . albedo_sfc ) self . flux_to_sfc = self . flux_down [ ... , - 1 , np . newaxis ] self . flux_from_sfc = ( self . emission_sfc + self . flux_reflected_up [ ... , - 1 , np . newaxis ] ) self . flux_up = self . trans . flux_up ( self . flux_from_sfc , self . emission + self . flux_reflected_up [ ... , 0 : - 1 ] ) self . flux_net = self . flux_up - self . flux_down self . absorbed = np . diff ( self . flux_net , axis = - 1 ) self . absorbed_total = np . sum ( self . absorbed , axis = - 1 ) self . flux_to_space = self . _compute_flux_top ( )
All fluxes are band by band
51,666
def flux_components_top ( self ) : N = self . lev . size flux_up_bottom = self . flux_from_sfc emission = np . zeros_like ( self . emission ) this_flux_up = ( np . ones_like ( self . Ts ) * self . trans . flux_up ( flux_up_bottom , emission ) ) sfcComponent = this_flux_up [ ... , - 1 ] atmComponents = np . zeros_like ( self . Tatm ) flux_up_bottom = np . zeros_like ( self . Ts ) for n in range ( N ) : emission = np . zeros_like ( self . emission ) emission [ ... , n ] = self . emission [ ... , n ] this_flux_up = self . trans . flux_up ( flux_up_bottom , emission ) atmComponents [ ... , n ] = this_flux_up [ ... , - 1 ] return sfcComponent , atmComponents
Compute the contributions to the outgoing flux to space due to emissions from each level and the surface .
51,667
def flux_components_bottom ( self ) : N = self . lev . size atmComponents = np . zeros_like ( self . Tatm ) flux_down_top = np . zeros_like ( self . Ts ) for n in range ( N ) : emission = np . zeros_like ( self . emission ) emission [ ... , n ] = self . emission [ ... , n ] this_flux_down = self . trans . flux_down ( flux_down_top , emission ) atmComponents [ ... , n ] = this_flux_down [ ... , 0 ] return atmComponents
Compute the contributions to the downwelling flux to surface due to emissions from each level .
51,668
def _compute ( self ) : tendencies = self . _temperature_tendencies ( ) if 'q' in self . state : tendencies [ 'Tatm' ] *= 0. Pa_per_hPa = 100. air_mass_per_area = self . Tatm . domain . lev . delta [ ... , - 1 ] * Pa_per_hPa / const . g specific_humidity_tendency = 0. * self . q specific_humidity_tendency [ ... , - 1 , np . newaxis ] = self . LHF / const . Lhvap / air_mass_per_area tendencies [ 'q' ] = specific_humidity_tendency return tendencies
Overides the _compute method of EnergyBudget
51,669
def Pn ( x ) : Pn = { } Pn [ '0' ] = P0 ( x ) Pn [ '1' ] = P1 ( x ) Pn [ '2' ] = P2 ( x ) Pn [ '3' ] = P3 ( x ) Pn [ '4' ] = P4 ( x ) Pn [ '5' ] = P5 ( x ) Pn [ '6' ] = P6 ( x ) Pn [ '8' ] = P8 ( x ) Pn [ '10' ] = P10 ( x ) Pn [ '12' ] = P12 ( x ) Pn [ '14' ] = P14 ( x ) Pn [ '16' ] = P16 ( x ) Pn [ '18' ] = P18 ( x ) Pn [ '20' ] = P20 ( x ) Pn [ '22' ] = P22 ( x ) Pn [ '24' ] = P24 ( x ) Pn [ '26' ] = P26 ( x ) Pn [ '28' ] = P28 ( x ) return Pn
Calculate Legendre polyomials P0 to P28 and returns them in a dictionary Pn .
51,670
def Pnprime ( x ) : Pnprime = { } Pnprime [ '0' ] = 0 Pnprime [ '1' ] = P1prime ( x ) Pnprime [ '2' ] = P2prime ( x ) Pnprime [ '3' ] = P3prime ( x ) Pnprime [ '4' ] = P4prime ( x ) Pnprime [ '6' ] = P6prime ( x ) Pnprime [ '8' ] = P8prime ( x ) Pnprime [ '10' ] = P10prime ( x ) Pnprime [ '12' ] = P12prime ( x ) Pnprime [ '14' ] = P14prime ( x ) return Pnprime
Calculates first derivatives of Legendre polynomials and returns them in a dictionary Pnprime .
51,671
def inferred_heat_transport ( self ) : phi = np . deg2rad ( self . lat ) energy_in = np . squeeze ( self . net_radiation ) return ( 1E-15 * 2 * np . math . pi * const . a ** 2 * integrate . cumtrapz ( np . cos ( phi ) * energy_in , x = phi , initial = 0. ) )
Calculates the inferred heat transport by integrating the TOA energy imbalance from pole to pole .
51,672
def rrtmg_lw_gen_source ( ext , build_dir ) : thispath = config . local_path module_src = [ ] for item in modules : fullname = join ( thispath , 'rrtmg_lw_v4.85' , 'gcm_model' , 'modules' , item ) module_src . append ( fullname ) for item in src : if item in mod_src : fullname = join ( thispath , 'sourcemods' , item ) else : fullname = join ( thispath , 'rrtmg_lw_v4.85' , 'gcm_model' , 'src' , item ) module_src . append ( fullname ) sourcelist = [ join ( thispath , '_rrtmg_lw.pyf' ) , join ( thispath , 'Driver.f90' ) ] try : config . have_f90c ( ) return module_src + sourcelist except : print ( 'No Fortran 90 compiler found, not building RRTMG_LW extension!' ) return None
Add RRTMG_LW fortran source if Fortran 90 compiler available if no compiler is found do not try to build the extension .
51,673
def compute ( self ) : for varname in self . tendencies : self . tendencies [ varname ] *= 0. if not self . has_process_type_list : self . _build_process_type_list ( ) tendencies = { } ignored = self . _compute_type ( 'diagnostic' ) tendencies [ 'explicit' ] = self . _compute_type ( 'explicit' ) for name , var in self . state . items ( ) : var += tendencies [ 'explicit' ] [ name ] * self . timestep tendencies [ 'implicit' ] = self . _compute_type ( 'implicit' ) for name , var in self . state . items ( ) : var += tendencies [ 'implicit' ] [ name ] * self . timestep tendencies [ 'adjustment' ] = self . _compute_type ( 'adjustment' ) for name , var in self . state . items ( ) : var -= ( ( tendencies [ 'implicit' ] [ name ] + tendencies [ 'explicit' ] [ name ] ) * self . timestep ) for proctype in [ 'explicit' , 'implicit' , 'adjustment' ] : for varname , tend in tendencies [ proctype ] . items ( ) : self . tendencies [ varname ] += tend self_tend = self . _compute ( ) if self . time_type is 'adjustment' : for varname , adj in self_tend . items ( ) : self_tend [ varname ] /= self . timestep for varname , tend in self_tend . items ( ) : self . tendencies [ varname ] += tend return self . tendencies
Computes the tendencies for all state variables given current state and specified input .
51,674
def _compute_type ( self , proctype ) : tendencies = { } for varname in self . state : tendencies [ varname ] = 0. * self . state [ varname ] for proc in self . process_types [ proctype ] : step_ratio = int ( proc . timestep / self . timestep ) if self . time [ 'steps' ] % step_ratio == 0 : proc . time [ 'active_now' ] = True tenddict = proc . compute ( ) else : proc . time [ 'active_now' ] = False tenddict = proc . tendencies for name , tend in tenddict . items ( ) : tendencies [ name ] += tend for diagname , value in proc . diagnostics . items ( ) : self . __setattr__ ( diagname , value ) return tendencies
Computes tendencies due to all subprocesses of given type proctype . Also pass all diagnostics up to parent process .
51,675
def _compute ( self ) : tendencies = { } for name , value in self . state . items ( ) : tendencies [ name ] = value * 0. return tendencies
Where the tendencies are actually computed ...
51,676
def _build_process_type_list ( self ) : self . process_types = { 'diagnostic' : [ ] , 'explicit' : [ ] , 'implicit' : [ ] , 'adjustment' : [ ] } for name , proc in self . subprocess . items ( ) : self . process_types [ proc . time_type ] . append ( proc ) self . has_process_type_list = True
Generates lists of processes organized by process type .
51,677
def step_forward ( self ) : tenddict = self . compute ( ) for varname , tend in tenddict . items ( ) : self . state [ varname ] += tend * self . timestep for name , proc , level in walk . walk_processes ( self , ignoreFlag = True ) : if proc . time [ 'active_now' ] : proc . _update_time ( )
Updates state variables with computed tendencies .
51,678
def _update_time ( self ) : self . time [ 'steps' ] += 1 self . time [ 'days_elapsed' ] += self . time [ 'timestep' ] / const . seconds_per_day if self . time [ 'day_of_year_index' ] >= self . time [ 'num_steps_per_year' ] - 1 : self . _do_new_calendar_year ( ) else : self . time [ 'day_of_year_index' ] += 1
Increments the timestep counter by one .
51,679
def integrate_years ( self , years = 1.0 , verbose = True ) : days = years * const . days_per_year numsteps = int ( self . time [ 'num_steps_per_year' ] * years ) if verbose : print ( "Integrating for " + str ( numsteps ) + " steps, " + str ( days ) + " days, or " + str ( years ) + " years." ) for count in range ( numsteps ) : self . step_forward ( ) if count == 0 : self . timeave = self . state . copy ( ) self . timeave . update ( self . diagnostics ) for varname , value in self . timeave . items ( ) : if value is None : continue self . timeave [ varname ] = 0 * value for varname in list ( self . timeave . keys ( ) ) : try : self . timeave [ varname ] += self . state [ varname ] except : try : self . timeave [ varname ] += self . diagnostics [ varname ] except : pass for varname , value in self . timeave . items ( ) : if value is None : continue self . timeave [ varname ] /= numsteps if verbose : print ( "Total elapsed time is %s years." % str ( self . time [ 'days_elapsed' ] / const . days_per_year ) )
Integrates the model by a given number of years .
51,680
def integrate_days ( self , days = 1.0 , verbose = True ) : years = days / const . days_per_year self . integrate_years ( years = years , verbose = verbose )
Integrates the model forward for a specified number of days .
51,681
def integrate_converge ( self , crit = 1e-4 , verbose = True ) : for varname , value in self . state . items ( ) : value_old = copy . deepcopy ( value ) self . integrate_years ( 1 , verbose = False ) while np . max ( np . abs ( value_old - value ) ) > crit : value_old = copy . deepcopy ( value ) self . integrate_years ( 1 , verbose = False ) if verbose == True : print ( "Total elapsed time is %s years." % str ( self . time [ 'days_elapsed' ] / const . days_per_year ) )
Integrates the model until model states are converging .
51,682
def cam3_gen_source ( ext , build_dir ) : fort90source = [ 'pmgrid.F90' , 'prescribed_aerosols.F90' , 'shr_kind_mod.F90' , 'quicksort.F90' , 'abortutils.F90' , 'absems.F90' , 'wv_saturation.F90' , 'aer_optics.F90' , 'cmparray_mod.F90' , 'shr_const_mod.F90' , 'physconst.F90' , 'pkg_cldoptics.F90' , 'gffgch.F90' , 'chem_surfvals.F90' , 'volcrad.F90' , 'radae.F90' , 'radlw.F90' , 'radsw.F90' , 'crm.F90' , ] thispath = config . local_path sourcelist = [ ] sourcelist . append ( join ( thispath , '_cam3.pyf' ) ) for item in fort90source : sourcelist . append ( join ( thispath , 'src' , item ) ) sourcelist . append ( join ( thispath , 'Driver.f90' ) ) try : config . have_f90c ( ) return sourcelist except : print ( 'No Fortran 90 compiler found, not building CAM3 extension!' ) return None
Add CAM3 fortran source if Fortran 90 compiler available if no compiler is found do not try to build the extension .
51,683
def global_mean ( field ) : try : lat = field . domain . lat . points except : raise ValueError ( 'No latitude axis in input field.' ) try : lon = field . domain . lon . points return _global_mean_latlon ( field . squeeze ( ) ) except : lat_radians = np . deg2rad ( lat ) return _global_mean ( field . squeeze ( ) , lat_radians )
Calculates the latitude weighted global mean of a field with latitude dependence .
51,684
def to_latlon ( array , domain , axis = 'lon' ) : axis , array , depth = np . meshgrid ( domain . axes [ axis ] . points , array , domain . axes [ 'depth' ] . points ) if axis == 'lat' : np . swapaxes ( array , 1 , 0 ) return Field ( array , domain = domain )
Broadcasts a 1D axis dependent array across another axis .
51,685
def Field_to_xarray ( field ) : dom = field . domain dims = [ ] dimlist = [ ] coords = { } for axname in dom . axes : dimlist . append ( axname ) try : assert field . interfaces [ dom . axis_index [ axname ] ] bounds_name = axname + '_bounds' dims . append ( bounds_name ) coords [ bounds_name ] = dom . axes [ axname ] . bounds except : dims . append ( axname ) coords [ axname ] = dom . axes [ axname ] . points da = DataArray ( field . transpose ( [ dom . axis_index [ name ] for name in dimlist ] ) , dims = dims , coords = coords ) for name in dims : try : da [ name ] . attrs [ 'units' ] = dom . axes [ name ] . units except : pass return da
Convert a climlab . Field object to xarray . DataArray
51,686
def state_to_xarray ( state ) : from climlab . domain . field import Field ds = Dataset ( ) for name , field in state . items ( ) : if isinstance ( field , Field ) : ds [ name ] = Field_to_xarray ( field ) dom = field . domain for axname , ax in dom . axes . items ( ) : bounds_name = axname + '_bounds' ds . coords [ bounds_name ] = DataArray ( ax . bounds , dims = [ bounds_name ] , coords = { bounds_name : ax . bounds } ) try : ds [ bounds_name ] . attrs [ 'units' ] = ax . units except : pass else : warnings . warn ( '{} excluded from Dataset because it is not a Field variable.' . format ( name ) ) return ds
Convert a dictionary of climlab . Field objects to xarray . Dataset
51,687
def to_xarray ( input ) : from climlab . domain . field import Field if isinstance ( input , Field ) : return Field_to_xarray ( input ) elif isinstance ( input , dict ) : return state_to_xarray ( input ) else : raise TypeError ( 'input must be Field object or dictionary of Field objects' )
Convert climlab input to xarray format .
51,688
def generate ( data , dimOrder , maxWindowSize , overlapPercent , transforms = [ ] ) : width = data . shape [ dimOrder . index ( 'w' ) ] height = data . shape [ dimOrder . index ( 'h' ) ] return generateForSize ( width , height , dimOrder , maxWindowSize , overlapPercent , transforms )
Generates a set of sliding windows for the specified dataset .
51,689
def generateForSize ( width , height , dimOrder , maxWindowSize , overlapPercent , transforms = [ ] ) : windowSizeX = min ( maxWindowSize , width ) windowSizeY = min ( maxWindowSize , height ) windowOverlapX = int ( math . floor ( windowSizeX * overlapPercent ) ) windowOverlapY = int ( math . floor ( windowSizeY * overlapPercent ) ) stepSizeX = windowSizeX - windowOverlapX stepSizeY = windowSizeY - windowOverlapY lastX = width - windowSizeX lastY = height - windowSizeY xOffsets = list ( range ( 0 , lastX + 1 , stepSizeX ) ) yOffsets = list ( range ( 0 , lastY + 1 , stepSizeY ) ) if len ( xOffsets ) == 0 or xOffsets [ - 1 ] != lastX : xOffsets . append ( lastX ) if len ( yOffsets ) == 0 or yOffsets [ - 1 ] != lastY : yOffsets . append ( lastY ) windows = [ ] for xOffset in xOffsets : for yOffset in yOffsets : for transform in [ None ] + transforms : windows . append ( SlidingWindow ( x = xOffset , y = yOffset , w = windowSizeX , h = windowSizeY , dimOrder = dimOrder , transform = transform ) ) return windows
Generates a set of sliding windows for a dataset with the specified dimensions and order .
51,690
def apply ( self , matrix ) : view = matrix [ self . indices ( ) ] return self . transform ( view ) if self . transform != None else view
Slices the supplied matrix and applies any transform bound to this window
51,691
def indices ( self , includeChannel = True ) : if self . dimOrder == DimOrder . HeightWidthChannel : return ( slice ( self . y , self . y + self . h ) , slice ( self . x , self . x + self . w ) ) elif self . dimOrder == DimOrder . ChannelHeightWidth : if includeChannel is True : return ( slice ( None , None ) , slice ( self . y , self . y + self . h ) , slice ( self . x , self . x + self . w ) ) else : return ( slice ( self . y , self . y + self . h ) , slice ( self . x , self . x + self . w ) ) else : raise Error ( 'Unsupported order of dimensions: ' + str ( self . dimOrder ) )
Retrieves the indices for this window as a tuple of slices
51,692
def batchWindows ( windows , batchSize ) : return np . array_split ( np . array ( windows ) , len ( windows ) // batchSize )
Splits a list of windows into a series of batches .
51,693
def generateDistanceMatrix ( width , height ) : originX = width / 2 originY = height / 2 distances = zerosFactory ( ( height , width ) , dtype = np . float ) for index , val in np . ndenumerate ( distances ) : y , x = index distances [ ( y , x ) ] = math . sqrt ( math . pow ( x - originX , 2 ) + math . pow ( y - originY , 2 ) ) return distances
Generates a matrix specifying the distance of each point in a window to its centre .
51,694
def _requiredSize ( shape , dtype ) : return math . floor ( np . prod ( np . asarray ( shape , dtype = np . uint64 ) ) * np . dtype ( dtype ) . itemsize )
Determines the number of bytes required to store a NumPy array with the specified shape and datatype .
51,695
def arrayFactory ( shape , dtype = float ) : requiredBytes = _requiredSize ( shape , dtype ) vmem = psutil . virtual_memory ( ) if vmem . available > requiredBytes : return np . ndarray ( shape = shape , dtype = dtype ) else : return TempfileBackedArray ( shape = shape , dtype = dtype )
Creates a new ndarray of the specified shape and datatype storing it in memory if there is sufficient available space or else using a memory - mapped temporary file to provide the underlying buffer .
51,696
def arrayCast ( source , dtype ) : requiredBytes = _requiredSize ( source . shape , dtype ) vmem = psutil . virtual_memory ( ) if vmem . available > requiredBytes : return source . astype ( dtype , subok = False ) else : dest = arrayFactory ( source . shape , dtype ) np . copyto ( dest , source , casting = 'unsafe' ) return dest
Casts a NumPy array to the specified datatype storing the copy in memory if there is sufficient available space or else using a memory - mapped temporary file to provide the underlying buffer .
51,697
def determineMaxWindowSize ( dtype , limit = None ) : vmem = psutil . virtual_memory ( ) maxSize = math . floor ( math . sqrt ( vmem . available / np . dtype ( dtype ) . itemsize ) ) if limit is None or limit >= maxSize : return maxSize else : return limit
Determines the largest square window size that can be used based on the specified datatype and amount of currently available system memory . If limit is specified then this value will be returned in the event that it is smaller than the maximum computed size .
51,698
def set_state ( self , state ) : for k , v in state . items ( ) : setattr ( self , k , v )
Set the view state .
51,699
def _fullname ( o ) : return o . __module__ + "." + o . __name__ if o . __module__ else o . __name__
Return the fully - qualified name of a function .