idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
49,200
def islitlet_progress ( islitlet , islitlet_max ) : if islitlet % 10 == 0 : cout = str ( islitlet // 10 ) else : cout = '.' sys . stdout . write ( cout ) if islitlet == islitlet_max : sys . stdout . write ( '\n' ) sys . stdout . flush ( )
Auxiliary function to print out progress in loop of slitlets .
49,201
def get_corrector_f ( rinput , meta , ins , datamodel ) : from emirdrp . processing . flatfield import FlatFieldCorrector flat_info = meta [ 'master_flat' ] with rinput . master_flat . open ( ) as hdul : _logger . info ( 'loading intensity flat' ) _logger . debug ( 'flat info: %s' , flat_info ) mflat = hdul [ 0 ] . data mask1 = mflat < 0 mask2 = ~ numpy . isfinite ( mflat ) if numpy . any ( mask1 ) : _logger . warning ( 'flat has %d values below 0' , mask1 . sum ( ) ) if numpy . any ( mask2 ) : _logger . warning ( 'flat has %d NaN' , mask2 . sum ( ) ) flat_corrector = FlatFieldCorrector ( mflat , datamodel = datamodel , calibid = datamodel . get_imgid ( hdul ) ) return flat_corrector
Corrector for intensity flat
49,202
def loginfo ( method ) : def loginfo_method ( self , rinput ) : klass = rinput . __class__ for key in klass . stored ( ) : val = getattr ( rinput , key ) if isinstance ( val , DataFrame ) : self . logger . debug ( "DataFrame %s" , info . gather_info_dframe ( val ) ) elif isinstance ( val , ObservationResult ) : for f in val . images : self . logger . debug ( "OB DataFrame %s" , info . gather_info_dframe ( f ) ) else : pass result = method ( self , rinput ) return result return loginfo_method
Log the contents of Recipe Input
49,203
def to_host ( self ) : return Host ( self . address , alias = self . alias , user = self . user , keyfile = self . keyfile , port = self . port , extra = self . extra )
Copy or coerce to a Host .
49,204
def get_table_info ( * table_names ) : ret = { } if table_names : for table_name in table_names : for name , inter in get_interfaces ( ) . items ( ) : if inter . has_table ( table_name ) : yield table_name , inter , inter . get_fields ( table_name ) else : for name , inter in get_interfaces ( ) . items ( ) : table_names = inter . get_tables ( ) for table_name in table_names : yield table_name , inter , inter . get_fields ( table_name )
Returns a dict with table_name keys mapped to the Interface that table exists in
49,205
def main_generate ( table_names , stream ) : with stream . open ( ) as fp : fp . write_line ( "from datetime import datetime, date" ) fp . write_line ( "from decimal import Decimal" ) fp . write_line ( "from prom import Orm, Field" ) fp . write_newlines ( ) for table_name , inter , fields in get_table_info ( * table_names ) : fp . write_line ( "class {}(Orm):" . format ( table_name . title ( ) . replace ( "_" , "" ) ) ) fp . write_line ( " table_name = '{}'" . format ( table_name ) ) if inter . connection_config . name : fp . write_line ( " connection_name = '{}'" . format ( inter . connection_config . name ) ) fp . write_newlines ( ) magic_field_names = set ( [ "_id" , "_created" , "_updated" ] ) if "_id" in fields : fp . write_line ( get_field_def ( "_id" , fields . pop ( "_id" ) ) ) magic_field_names . discard ( "_id" ) for field_name , field_d in fields . items ( ) : fp . write_line ( get_field_def ( field_name , field_d ) ) for magic_field_name in magic_field_names : if magic_field_name not in fields : fp . write_line ( " {} = None" . format ( magic_field_name ) ) fp . write_newlines ( 2 )
This will print out valid prom python code for given tables that already exist in a database .
49,206
def exvp ( pos_x , pos_y ) : pos_x = numpy . asarray ( pos_x ) pos_y = numpy . asarray ( pos_y ) center = [ 1024.5 , 1024.5 ] cf = EMIR_PLATESCALE_RADS pos_base_x = pos_x - center [ 0 ] pos_base_y = pos_y - center [ 1 ] ra = numpy . hypot ( pos_base_x , pos_base_y ) thet = numpy . arctan2 ( pos_base_y , pos_base_x ) r = cf * ra rr1 = 1 + 14606.7 * r ** 2 + 1739716115.1 * r ** 4 nx1 = rr1 * ra * numpy . cos ( thet ) + center [ 0 ] ny1 = rr1 * ra * numpy . sin ( thet ) + center [ 1 ] return nx1 , ny1
Convert virtual pixel to real pixel
49,207
def match_sp_sep ( first , second ) : if isinstance ( first , list ) : one = [ set ( v . split ( " " ) ) for v in first ] else : one = [ { v } for v in first . split ( " " ) ] if isinstance ( second , list ) : other = [ set ( v . split ( " " ) ) for v in second ] else : other = [ { v } for v in second . split ( " " ) ] if any ( rt not in other for rt in one ) : return False return True
Verify that all the values in first appear in second . The values can either be in the form of lists or as space separated items .
49,208
def _verify_sector_identifier ( self , request ) : si_url = request [ "sector_identifier_uri" ] try : res = self . endpoint_context . httpc . get ( si_url ) except Exception as err : logger . error ( err ) res = None if not res : raise InvalidSectorIdentifier ( "Couldn't read from sector_identifier_uri" ) logger . debug ( "sector_identifier_uri => %s" , sanitize ( res . text ) ) try : si_redirects = json . loads ( res . text ) except ValueError : raise InvalidSectorIdentifier ( "Error deserializing sector_identifier_uri content" ) if "redirect_uris" in request : logger . debug ( "redirect_uris: %s" , request [ "redirect_uris" ] ) for uri in request [ "redirect_uris" ] : if uri not in si_redirects : raise InvalidSectorIdentifier ( "redirect_uri missing from sector_identifiers" ) return si_redirects , si_url
Verify sector_identifier_uri is reachable and that it contains redirect_uri s .
49,209
def read ( self ) : __result = [ ] __ll = self . readline ( ) while __ll : __result . append ( __ll ) __ll = self . readline ( ) return list ( __result )
Read the file until EOF and return a list of dictionaries .
49,210
def close ( self ) : if self . _file : if not ( self . _file . closed ) : self . _file . close ( ) self . closed = True
Close the SExtractor file .
49,211
def get_driver ( configuration ) : resources = configuration [ "resources" ] machines = resources [ "machines" ] networks = resources [ "networks" ] oargrid_jobids = configuration . get ( "oargrid_jobids" ) if oargrid_jobids : logger . debug ( "Loading the OargridStaticDriver" ) return OargridStaticDriver ( oargrid_jobids ) else : job_name = configuration . get ( "job_name" , DEFAULT_JOB_NAME ) walltime = configuration . get ( "walltime" , DEFAULT_WALLTIME ) job_type = configuration . get ( "job_type" , JOB_TYPE_DEPLOY ) reservation_date = configuration . get ( "reservation" , False ) queue = configuration . get ( "queue" , None ) logger . debug ( "Loading the OargridDynamicDriver" ) return OargridDynamicDriver ( job_name , walltime , job_type , reservation_date , queue , machines , networks )
Build an instance of the driver to interact with G5K
49,212
def ver_dec_content ( parts , sign_key = None , enc_key = None , sign_alg = 'SHA256' ) : if parts is None : return None elif len ( parts ) == 3 : timestamp , load , b64_mac = parts mac = base64 . b64decode ( b64_mac ) verifier = HMACSigner ( algorithm = sign_alg ) if verifier . verify ( load . encode ( 'utf-8' ) + timestamp . encode ( 'utf-8' ) , mac , sign_key . key ) : return load , timestamp else : raise VerificationError ( ) elif len ( parts ) == 4 : b_timestamp = parts [ 0 ] iv = base64 . b64decode ( parts [ 1 ] ) ciphertext = base64 . b64decode ( parts [ 2 ] ) tag = base64 . b64decode ( parts [ 3 ] ) decrypter = AES_GCMEncrypter ( key = enc_key . key ) msg = decrypter . decrypt ( ciphertext , iv , tag = tag ) p = lv_unpack ( msg . decode ( 'utf-8' ) ) load = p [ 0 ] timestamp = p [ 1 ] if len ( p ) == 3 : verifier = HMACSigner ( algorithm = sign_alg ) if verifier . verify ( load . encode ( 'utf-8' ) + timestamp . encode ( 'utf-8' ) , base64 . b64decode ( p [ 2 ] ) , sign_key . key ) : return load , timestamp else : return load , timestamp return None
Verifies the value of a cookie
49,213
def make_cookie_content ( name , load , sign_key , domain = None , path = None , timestamp = "" , enc_key = None , max_age = 0 , sign_alg = 'SHA256' ) : if not timestamp : timestamp = str ( int ( time . time ( ) ) ) _cookie_value = sign_enc_payload ( load , timestamp , sign_key = sign_key , enc_key = enc_key , sign_alg = sign_alg ) content = { name : { "value" : _cookie_value } } if path is not None : content [ name ] [ "path" ] = path if domain is not None : content [ name ] [ "domain" ] = domain content [ name ] [ 'httponly' ] = True if max_age : content [ name ] [ "expires" ] = in_a_while ( seconds = max_age ) return content
Create and return a cookies content
49,214
def cookie_parts ( name , kaka ) : cookie_obj = SimpleCookie ( as_unicode ( kaka ) ) morsel = cookie_obj . get ( name ) if morsel : return morsel . value . split ( "|" ) else : return None
Give me the parts of the cookie payload
49,215
def delete_cookie ( self , cookie_name = None ) : if cookie_name is None : cookie_name = self . default_value [ 'name' ] return self . create_cookie ( "" , "" , cookie_name = cookie_name , kill = True )
Create a cookie that will immediately expire when it hits the other side .
49,216
def get_cookie_value ( self , cookie = None , cookie_name = None ) : if cookie_name is None : cookie_name = self . default_value [ 'name' ] if cookie is None or cookie_name is None : return None else : try : info , timestamp = parse_cookie ( cookie_name , self . sign_key , cookie , self . enc_key , self . sign_alg ) except ( TypeError , AssertionError ) : return None else : value , _ts , typ = info . split ( "::" ) if timestamp == _ts : return value , _ts , typ return None
Return information stored in a Cookie
49,217
def append_cookie ( self , cookie , name , payload , typ , domain = None , path = None , timestamp = "" , max_age = 0 ) : timestamp = str ( int ( time . time ( ) ) ) try : _payload = "::" . join ( [ payload , timestamp , typ ] ) except TypeError : _payload = "::" . join ( [ payload [ 0 ] , timestamp , typ ] ) content = make_cookie_content ( name , _payload , self . sign_key , domain = domain , path = path , timestamp = timestamp , enc_key = self . enc_key , max_age = max_age , sign_alg = self . sign_alg ) for name , args in content . items ( ) : cookie [ name ] = args [ 'value' ] for key , value in args . items ( ) : if key == 'value' : continue cookie [ name ] [ key ] = value return cookie
Adds a cookie to a SimpleCookie instance
49,218
def f_ac_power ( inverter , v_mp , p_mp ) : return pvlib . pvsystem . snlinverter ( v_mp , p_mp , inverter ) . flatten ( )
Calculate AC power
49,219
def f_dc_power ( effective_irradiance , cell_temp , module ) : dc = pvlib . pvsystem . sapm ( effective_irradiance , cell_temp , module ) fields = ( 'i_sc' , 'i_mp' , 'v_oc' , 'v_mp' , 'p_mp' ) return tuple ( dc [ field ] for field in fields )
Calculate DC power using Sandia Performance model
49,220
def f_effective_irradiance ( poa_direct , poa_diffuse , am_abs , aoi , module ) : Ee = pvlib . pvsystem . sapm_effective_irradiance ( poa_direct , poa_diffuse , am_abs , aoi , module ) return Ee . reshape ( 1 , - 1 )
Calculate effective irradiance for Sandia Performance model
49,221
def f_cell_temp ( poa_global , wind_speed , air_temp ) : temps = pvlib . pvsystem . sapm_celltemp ( poa_global , wind_speed , air_temp ) return temps [ 'temp_cell' ] . values , temps [ 'temp_module' ] . values
Calculate cell temperature .
49,222
def f_aoi ( surface_tilt , surface_azimuth , solar_zenith , solar_azimuth ) : return pvlib . irradiance . aoi ( surface_tilt , surface_azimuth , solar_zenith , solar_azimuth )
Calculate angle of incidence
49,223
def find_position ( edges , prow , bstart , bend , total = 5 ) : nt = total // 2 if prow - nt < 0 or prow + nt >= edges . shape [ 0 ] : return [ ] s2edges = edges [ prow - nt : prow + nt + 1 , bstart : bend ] structure = morph . generate_binary_structure ( 2 , 2 ) har , num_f = mes . label ( s2edges , structure = structure ) cen_of_mass = mes . center_of_mass ( s2edges , labels = har , index = range ( 1 , num_f + 1 ) ) cen_of_mass_off = [ ( x + bstart , prow - nt + y ) for y , x in cen_of_mass ] return cen_of_mass_off
Find a EMIR CSU bar position in a edge image .
49,224
def calc_fwhm ( img , region , fexpand = 3 , axis = 0 ) : xpregion = expand_region ( region , fexpand , fexpand ) cslit = img [ xpregion ] pslit = cslit . mean ( axis = axis ) x2 = len ( pslit ) y1 , y2 = pslit [ 0 ] , pslit [ - 1 ] mslope = ( y2 - y1 ) / x2 backstim = mslope * numpy . arange ( x2 ) + y1 qslit = pslit - backstim pidx = numpy . argmax ( qslit ) peak , fwhm = fmod . compute_fwhm_1d_simple ( qslit , pidx ) return fwhm
Compute the FWHM in the direction given by axis
49,225
def simple_prot ( x , start ) : for i in range ( start , len ( x ) - 1 ) : a , b , c = x [ i - 1 ] , x [ i ] , x [ i + 1 ] if b - a > 0 and b - c >= 0 : return i else : return None
Find the first peak to the right of start
49,226
def position_half_h ( pslit , cpix , backw = 4 ) : next_peak = simple_prot ( pslit , cpix ) if next_peak is None : raise ValueError dis_peak = next_peak - cpix wpos2 = cpix - dis_peak wpos1 = wpos2 - backw left_background = pslit [ wpos1 : wpos2 ] . min ( ) height = pslit [ next_peak ] - left_background half_height = left_background + 0.5 * height vv = pslit [ wpos1 : next_peak + 1 ] - half_height res1 , = numpy . nonzero ( numpy . diff ( vv > 0 ) ) i1 = res1 [ 0 ] xint = wpos1 + i1 + ( 0 - vv [ i1 ] ) / ( vv [ i1 + 1 ] - vv [ i1 ] ) return xint , next_peak , wpos1 , wpos2 , left_background , half_height
Find the position where the value is half of the peak
49,227
def locate_bar_l ( icut , epos ) : def swap_coor ( x ) : return x def swap_line ( tab ) : return tab return _locate_bar_gen ( icut , epos , transform1 = swap_coor , transform2 = swap_line )
Fine position of the left CSU bar
49,228
def locate_bar_r ( icut , epos ) : sm = len ( icut ) def swap_coor ( x ) : return sm - 1 - x def swap_line ( tab ) : return tab [ : : - 1 ] return _locate_bar_gen ( icut , epos , transform1 = swap_coor , transform2 = swap_line )
Fine position of the right CSU bar
49,229
def _locate_bar_gen ( icut , epos , transform1 , transform2 ) : epos_pix = coor_to_pix_1d ( epos ) epos_pix_s = transform1 ( epos_pix ) icut2 = transform2 ( icut ) try : res = position_half_h ( icut2 , epos_pix_s ) xint_s , next_peak_s , wpos1_s , wpos2_s , background_level , half_height = res xint = transform1 ( xint_s ) epos_f = xint error = 0 except ValueError : error = 2 epos_f = epos return epos_pix , epos_f , error
Generic function for the fine position of the CSU
49,230
def overlap ( intv1 , intv2 ) : return max ( 0 , min ( intv1 [ 1 ] , intv2 [ 1 ] ) - max ( intv1 [ 0 ] , intv2 [ 0 ] ) )
Overlaping of two intervals
49,231
def exvp_scalar ( x , y , x0 , y0 , c2 , c4 , theta0 , ff ) : factor = 0.1944 * np . pi / ( 180.0 * 3600 ) r_pix = np . sqrt ( ( x - x0 * 1000 ) ** 2 + ( y - y0 * 1000 ) ** 2 ) r_rad = factor * r_pix rdist = ( 1 + c2 * 1.0E4 * r_rad ** 2 + c4 * 1.0E9 * r_rad ** 4 ) theta = np . arctan ( ( x - x0 * 1000 ) / ( y - y0 * 1000 ) ) if y < y0 * 1000 : theta = theta - np . pi xdist = ( rdist * r_pix * np . sin ( theta + theta0 ) ) + x0 * 1000 ydist = ( ff * rdist * r_pix * np . cos ( theta + theta0 ) ) + y0 * 1000 return xdist , ydist
Convert virtual pixel to real pixel .
49,232
def expected_distorted_boundaries ( islitlet , csu_bar_slit_center , borderlist , params , parmodel , numpts , deg , debugplot = 0 ) : c2 , c4 , ff , slit_gap , slit_height , theta0 , x0 , y0 , y_baseline = return_params ( islitlet , csu_bar_slit_center , params , parmodel ) xp = np . linspace ( 1 , EMIR_NAXIS1 , numpts ) slit_dist = ( slit_height * 10 ) + slit_gap ybottom = y_baseline * 100 + ( islitlet - 1 ) * slit_dist ytop = ybottom + ( slit_height * 10 ) list_spectrails = [ ] for borderval in borderlist : yvalue = ybottom + borderval * ( ytop - ybottom ) yp_value = np . ones ( numpts ) * yvalue xdist , ydist = exvp ( xp , yp_value , x0 = x0 , y0 = y0 , c2 = c2 , c4 = c4 , theta0 = theta0 , ff = ff ) spectrail = SpectrumTrail ( ) spectrail . fit ( x = xdist , y = ydist , deg = deg , debugplot = debugplot ) list_spectrails . append ( spectrail ) return list_spectrails
Return expected SpectrumTrail instances associated to a given slitlet .
49,233
def fun_residuals ( params , parmodel , bounddict , shrinking_factor , numresolution , islitmin , islitmax , debugplot ) : global FUNCTION_EVALUATIONS global_residual = 0.0 nsummed = 0 read_slitlets = list ( bounddict [ 'contents' ] . keys ( ) ) for tmp_slitlet in read_slitlets : islitlet = int ( tmp_slitlet [ 7 : ] ) if islitmin <= islitlet <= islitmax : read_dateobs = list ( bounddict [ 'contents' ] [ tmp_slitlet ] . keys ( ) ) for tmp_dateobs in read_dateobs : tmp_dict = bounddict [ 'contents' ] [ tmp_slitlet ] [ tmp_dateobs ] csu_bar_slit_center = tmp_dict [ 'csu_bar_slit_center' ] list_spectrails = expected_distorted_boundaries ( islitlet , csu_bar_slit_center , [ 0 , 1 ] , params , parmodel , numpts = numresolution , deg = 5 , debugplot = 0 ) poly_lower_expected = list_spectrails [ 0 ] . poly_funct poly_upper_expected = list_spectrails [ 1 ] . poly_funct poly_lower_measured = np . polynomial . Polynomial ( tmp_dict [ 'boundary_coef_lower' ] ) xmin_lower_bound = tmp_dict [ 'boundary_xmin_lower' ] xmax_lower_bound = tmp_dict [ 'boundary_xmax_lower' ] dx = ( xmax_lower_bound - xmin_lower_bound ) * ( 1 - shrinking_factor ) / 2 xdum_lower = np . linspace ( xmin_lower_bound + dx , xmax_lower_bound - dx , num = numresolution ) poly_diff = poly_lower_expected - poly_lower_measured global_residual += np . sum ( poly_diff ( xdum_lower ) ** 2 ) nsummed += numresolution poly_upper_measured = np . polynomial . Polynomial ( tmp_dict [ 'boundary_coef_upper' ] ) xmin_upper_bound = tmp_dict [ 'boundary_xmin_upper' ] xmax_upper_bound = tmp_dict [ 'boundary_xmax_upper' ] dx = ( xmax_lower_bound - xmin_lower_bound ) * ( 1 - shrinking_factor ) / 2 xdum_upper = np . linspace ( xmin_upper_bound + dx , xmax_upper_bound - dx , num = numresolution ) poly_diff = poly_upper_expected - poly_upper_measured global_residual += np . sum ( poly_diff ( xdum_upper ) ** 2 ) nsummed += numresolution if nsummed > 0 : global_residual = np . sqrt ( global_residual / nsummed ) if debugplot >= 10 : FUNCTION_EVALUATIONS += 1 print ( '-' * 79 ) print ( '>>> Number of function evaluations:' , FUNCTION_EVALUATIONS ) print ( '>>> global residual...............:' , global_residual ) params . pretty_print ( ) return global_residual
Function to be minimised .
49,234
def overplot_boundaries_from_bounddict ( ax , bounddict , micolors , linetype = '-' ) : for islitlet in range ( 1 , EMIR_NBARS + 1 ) : tmpcolor = micolors [ islitlet % 2 ] tmp_slitlet = 'slitlet' + str ( islitlet ) . zfill ( 2 ) if tmp_slitlet in bounddict [ 'contents' ] . keys ( ) : read_dateobs = list ( bounddict [ 'contents' ] [ tmp_slitlet ] . keys ( ) ) read_dateobs . sort ( ) for tmp_dateobs in read_dateobs : tmp_dict = bounddict [ 'contents' ] [ tmp_slitlet ] [ tmp_dateobs ] pol_lower_measured = np . polynomial . Polynomial ( tmp_dict [ 'boundary_coef_lower' ] ) xdum = np . linspace ( 1 , EMIR_NAXIS1 , num = EMIR_NAXIS1 ) ydum = pol_lower_measured ( xdum ) ax . plot ( xdum , ydum , tmpcolor + linetype ) pol_upper_measured = np . polynomial . Polynomial ( tmp_dict [ 'boundary_coef_upper' ] ) ydum = pol_upper_measured ( xdum ) ax . plot ( xdum , ydum , tmpcolor + linetype )
Overplot boundaries on current plot .
49,235
def overplot_boundaries_from_params ( ax , params , parmodel , list_islitlet , list_csu_bar_slit_center , micolors = ( 'm' , 'c' ) , linetype = '--' , labels = True , alpha_fill = None , global_offset_x_pix = 0 , global_offset_y_pix = 0 ) : xoff = float ( global_offset_x_pix ) yoff = float ( global_offset_y_pix ) list_pol_lower_boundaries = [ ] list_pol_upper_boundaries = [ ] for islitlet , csu_bar_slit_center in zip ( list_islitlet , list_csu_bar_slit_center ) : tmpcolor = micolors [ islitlet % 2 ] pol_lower_expected = expected_distorted_boundaries ( islitlet , csu_bar_slit_center , [ 0 ] , params , parmodel , numpts = 101 , deg = 5 , debugplot = 0 ) [ 0 ] . poly_funct list_pol_lower_boundaries . append ( pol_lower_expected ) pol_upper_expected = expected_distorted_boundaries ( islitlet , csu_bar_slit_center , [ 1 ] , params , parmodel , numpts = 101 , deg = 5 , debugplot = 0 ) [ 0 ] . poly_funct list_pol_upper_boundaries . append ( pol_upper_expected ) xdum = np . linspace ( 1 , EMIR_NAXIS1 , num = EMIR_NAXIS1 ) ydum1 = pol_lower_expected ( xdum ) ax . plot ( xdum + xoff , ydum1 + yoff , tmpcolor + linetype ) ydum2 = pol_upper_expected ( xdum ) ax . plot ( xdum + xoff , ydum2 + yoff , tmpcolor + linetype ) if alpha_fill is not None : ax . fill_between ( xdum + xoff , ydum1 + yoff , ydum2 + yoff , facecolor = tmpcolor , alpha = alpha_fill ) if labels : yc_lower = pol_lower_expected ( EMIR_NAXIS1 / 2 + 0.5 ) yc_upper = pol_upper_expected ( EMIR_NAXIS1 / 2 + 0.5 ) xcsu = EMIR_NAXIS1 * csu_bar_slit_center / 341.5 ax . text ( xcsu + xoff , ( yc_lower + yc_upper ) / 2 + yoff , str ( islitlet ) , fontsize = 10 , va = 'center' , ha = 'center' , bbox = dict ( boxstyle = "round,pad=0.1" , fc = "white" , ec = "grey" ) , color = tmpcolor , fontweight = 'bold' , backgroundcolor = 'white' ) return list_pol_lower_boundaries , list_pol_upper_boundaries
Overplot boundaries computed from fitted parameters .
49,236
def bound_params_from_dict ( bound_param_dict ) : params = Parameters ( ) for mainpar in EXPECTED_PARAMETER_LIST : if mainpar not in bound_param_dict [ 'contents' ] . keys ( ) : raise ValueError ( 'Parameter ' + mainpar + ' not found!' ) if bound_param_dict [ 'meta_info' ] [ 'parmodel' ] == "longslit" : dumdict = bound_param_dict [ 'contents' ] [ mainpar ] params . add ( mainpar , value = dumdict [ "value" ] , vary = dumdict [ "vary" ] ) elif bound_param_dict [ 'meta_info' ] [ 'parmodel' ] == 'multislit' : for subpar in [ 'a0s' , 'a1s' , 'a2s' ] : if subpar not in bound_param_dict [ 'contents' ] [ mainpar ] . keys ( ) : raise ValueError ( 'Subparameter ' + subpar + ' not found' + ' under parameter ' + mainpar ) cpar = mainpar + '_' + subpar dumdict = bound_param_dict [ 'contents' ] [ mainpar ] [ subpar ] params . add ( cpar , value = dumdict [ "value" ] , vary = dumdict [ "vary" ] ) else : print ( 'parmodel: ' , bound_param_dict [ 'meta_info' ] [ 'parmodel' ] ) raise ValueError ( 'Unexpected parmodel' ) return params
Define ~lmfit . parameter . Parameters object from dictionary .
49,237
def transaction_start ( self , name ) : if not name : raise ValueError ( "Transaction name cannot be empty" ) self . transaction_count += 1 logger . debug ( "{}. Start transaction {}" . format ( self . transaction_count , name ) ) if self . transaction_count == 1 : self . _transaction_start ( ) else : self . _transaction_started ( name ) return self . transaction_count
start a transaction
49,238
def transaction_fail ( self , name ) : if not name : raise ValueError ( "Transaction name cannot be empty" ) if self . transaction_count > 0 : logger . debug ( "{}. Failing transaction {}" . format ( self . transaction_count , name ) ) if self . transaction_count == 1 : self . _transaction_fail ( ) else : self . _transaction_failing ( name ) self . transaction_count -= 1
rollback a transaction if currently in one
49,239
def connect ( self , connection_config = None , * args , ** kwargs ) : if self . connected : return self . connected if connection_config : self . connection_config = connection_config self . connected = True try : self . _connect ( self . connection_config ) except Exception as e : self . connected = False self . raise_error ( e ) self . log ( "Connected {}" , self . connection_config . interface_name ) return self . connected
connect to the interface
49,240
def close ( self ) : if not self . connected : return True self . _close ( ) self . connected = False self . log ( "Closed Connection {}" , self . connection_config . interface_name ) return True
close an open connection
49,241
def query ( self , query_str , * query_args , ** query_options ) : with self . connection ( ** query_options ) as connection : query_options [ 'connection' ] = connection return self . _query ( query_str , query_args , ** query_options )
run a raw query on the db
49,242
def set_table ( self , schema , ** kwargs ) : with self . connection ( ** kwargs ) as connection : kwargs [ 'connection' ] = connection if self . has_table ( str ( schema ) , ** kwargs ) : return True try : with self . transaction ( ** kwargs ) : self . _set_table ( schema , ** kwargs ) for index_name , index in schema . indexes . items ( ) : self . set_index ( schema , name = index . name , fields = index . fields , connection = connection , ** index . options ) except InterfaceError : if not self . has_table ( schema , ** kwargs ) : raise
add the table to the db
49,243
def has_table ( self , table_name , ** kwargs ) : with self . connection ( kwargs . get ( 'connection' , None ) ) as connection : kwargs [ 'connection' ] = connection tables = self . get_tables ( table_name , ** kwargs ) return len ( tables ) > 0
check to see if a table is in the db
49,244
def get_tables ( self , table_name = "" , ** kwargs ) : with self . connection ( ** kwargs ) as connection : kwargs [ 'connection' ] = connection return self . _get_tables ( str ( table_name ) , ** kwargs )
get all the tables of the currently connected db
49,245
def delete_table ( self , schema , ** kwargs ) : with self . connection ( ** kwargs ) as connection : kwargs [ 'connection' ] = connection if not self . has_table ( str ( schema ) , ** kwargs ) : return True with self . transaction ( ** kwargs ) : self . _delete_table ( schema , ** kwargs ) return True
remove a table matching schema from the db
49,246
def delete_tables ( self , ** kwargs ) : if not kwargs . get ( 'disable_protection' , False ) : raise ValueError ( 'In order to delete all the tables, pass in disable_protection=True' ) with self . connection ( ** kwargs ) as connection : kwargs [ 'connection' ] = connection self . _delete_tables ( ** kwargs )
removes all the tables from the db
49,247
def get_indexes ( self , schema , ** kwargs ) : with self . connection ( ** kwargs ) as connection : kwargs [ 'connection' ] = connection return self . _get_indexes ( schema , ** kwargs )
get all the indexes
49,248
def set_index ( self , schema , name , fields , ** index_options ) : with self . transaction ( ** index_options ) as connection : index_options [ 'connection' ] = connection self . _set_index ( schema , name , fields , ** index_options ) return True
add an index to the table
49,249
def insert ( self , schema , fields , ** kwargs ) : r = 0 with self . connection ( ** kwargs ) as connection : kwargs [ 'connection' ] = connection try : with self . transaction ( ** kwargs ) : r = self . _insert ( schema , fields , ** kwargs ) except Exception as e : exc_info = sys . exc_info ( ) if self . handle_error ( schema , e , ** kwargs ) : r = self . _insert ( schema , fields , ** kwargs ) else : self . raise_error ( e , exc_info ) return r
Persist d into the db
49,250
def update ( self , schema , fields , query , ** kwargs ) : with self . connection ( ** kwargs ) as connection : kwargs [ 'connection' ] = connection try : with self . transaction ( ** kwargs ) : r = self . _update ( schema , fields , query , ** kwargs ) except Exception as e : exc_info = sys . exc_info ( ) if self . handle_error ( schema , e , ** kwargs ) : r = self . _update ( schema , fields , query , ** kwargs ) else : self . raise_error ( e , exc_info ) return r
Persist the query . fields into the db that match query . fields_where
49,251
def _get_query ( self , callback , schema , query = None , * args , ** kwargs ) : if not query : query = Query ( ) ret = None with self . connection ( ** kwargs ) as connection : kwargs [ 'connection' ] = connection try : if connection . in_transaction ( ) : with self . transaction ( ** kwargs ) : ret = callback ( schema , query , * args , ** kwargs ) else : ret = callback ( schema , query , * args , ** kwargs ) except Exception as e : exc_info = sys . exc_info ( ) if self . handle_error ( schema , e , ** kwargs ) : ret = callback ( schema , query , * args , ** kwargs ) else : self . raise_error ( e , exc_info ) return ret
this is just a common wrapper around all the get queries since they are all really similar in how they execute
49,252
def get_one ( self , schema , query = None , ** kwargs ) : ret = self . _get_query ( self . _get_one , schema , query , ** kwargs ) if not ret : ret = { } return ret
get one row from the db matching filters set in query
49,253
def get ( self , schema , query = None , ** kwargs ) : ret = self . _get_query ( self . _get , schema , query , ** kwargs ) if not ret : ret = [ ] return ret
get matching rows from the db matching filters set in query
49,254
def log ( self , format_str , * format_args , ** log_options ) : if isinstance ( format_str , Exception ) : logger . exception ( format_str , * format_args ) else : log_level = log_options . get ( 'level' , logging . DEBUG ) if logger . isEnabledFor ( log_level ) : if format_args : logger . log ( log_level , format_str . format ( * format_args ) ) else : logger . log ( log_level , format_str )
wrapper around the module s logger
49,255
def raise_error ( self , e , exc_info = None ) : if not exc_info : exc_info = sys . exc_info ( ) if not isinstance ( e , InterfaceError ) : if not hasattr ( builtins , e . __class__ . __name__ ) : e = self . _create_error ( e , exc_info ) reraise ( e . __class__ , e , exc_info [ 2 ] )
this is just a wrapper to make the passed in exception an InterfaceError
49,256
def _set_all_tables ( self , schema , ** kwargs ) : with self . transaction ( ** kwargs ) as connection : kwargs [ 'connection' ] = connection for field_name , field_val in schema . fields . items ( ) : s = field_val . schema if s : self . _set_all_tables ( s , ** kwargs ) self . set_table ( schema , ** kwargs ) return True
You can run into a problem when you are trying to set a table and it has a foreign key to a table that doesn t exist so this method will go through all fk refs and make sure the tables exist
49,257
def _set_all_fields ( self , schema , ** kwargs ) : current_fields = self . get_fields ( schema , ** kwargs ) for field_name , field in schema . fields . items ( ) : if field_name not in current_fields : if field . required : raise ValueError ( 'Cannot safely add {} on the fly because it is required' . format ( field_name ) ) else : query_str = [ ] query_str . append ( 'ALTER TABLE' ) query_str . append ( ' {}' . format ( schema ) ) query_str . append ( 'ADD COLUMN' ) query_str . append ( ' {}' . format ( self . get_field_SQL ( field_name , field ) ) ) query_str = os . linesep . join ( query_str ) self . query ( query_str , ignore_result = True , ** kwargs ) return True
this will add fields that don t exist in the table if they can be set to NULL the reason they have to be NULL is adding fields to Postgres that can be NULL is really light but if they have a default value then it can be costly
49,258
def random_path ( instance , filename ) : uuid_hex = get_uuid ( ) return os . path . join ( uuid_hex [ : 3 ] , uuid_hex [ 3 : ] , filename )
Random path generator for uploads specify this for upload_to = argument of FileFields
49,259
def initialize ( signal_number = DEFAULT_TIMER_SIGNAL_NUMBER , update_period_s = DEFAULT_UPDATE_PERIOD_S ) : global initialized if initialized : return initialized = True uwsgi . add_timer ( signal_number , update_period_s ) uwsgi . register_signal ( signal_number , MULE , emit )
Initialize metrics must be invoked at least once prior to invoking any other method .
49,260
def emit ( _ ) : if not initialized : raise NotInitialized view = { 'version' : __version__ , 'counters' : { } , 'gauges' : { } , 'histograms' : { } , 'meters' : { } , 'timers' : { } , } for ( ty , module , name ) , metric in six . iteritems ( all_metrics ) : view [ ty ] [ '%s.%s' % ( module , name ) ] = metric . view ( ) marshalled_view = marshal . dumps ( view ) if len ( marshalled_view ) > MAX_MARSHALLED_VIEW_SIZE : log . warn ( 'Marshalled length too large, got %d, max %d. ' 'Try recording fewer metrics or increasing ' 'MAX_MARSHALLED_VIEW_SIZE' % ( len ( marshalled_view ) , MAX_MARSHALLED_VIEW_SIZE ) ) return marshalled_metrics_mmap . seek ( 0 ) try : uwsgi . lock ( ) marshalled_metrics_mmap . write ( marshalled_view ) finally : uwsgi . unlock ( )
Serialize metrics to the memory mapped buffer .
49,261
def view ( ) : if not initialized : raise NotInitialized marshalled_metrics_mmap . seek ( 0 ) try : uwsgi . lock ( ) marshalled_view = marshalled_metrics_mmap . read ( MAX_MARSHALLED_VIEW_SIZE ) finally : uwsgi . unlock ( ) return marshal . loads ( marshalled_view )
Get a dictionary representation of current metrics .
49,262
def convert_out ( self , obj ) : newobj = super ( ProcessedImageProduct , self ) . convert_out ( obj ) if newobj : hdulist = newobj . open ( ) hdr = hdulist [ 0 ] . header if 'EMIRUUID' not in hdr : hdr [ 'EMIRUUID' ] = str ( uuid . uuid1 ( ) ) return newobj
Write EMIRUUID header on reduction
49,263
def verify_client ( endpoint_context , request , authorization_info ) : if not authorization_info : if 'client_id' in request and 'client_secret' in request : auth_info = ClientSecretPost ( endpoint_context ) . verify ( request ) auth_info [ 'method' ] = 'client_secret_post' elif 'client_assertion' in request : auth_info = JWSAuthnMethod ( endpoint_context ) . verify ( request ) auth_info [ 'method' ] = 'private_key_jwt' elif 'access_token' in request : auth_info = BearerBody ( endpoint_context ) . verify ( request ) auth_info [ 'method' ] = 'bearer_body' else : raise UnknownOrNoAuthnMethod ( ) else : if authorization_info . startswith ( 'Basic ' ) : auth_info = ClientSecretBasic ( endpoint_context ) . verify ( request , authorization_info ) auth_info [ 'method' ] = 'client_secret_basic' elif authorization_info . startswith ( 'Bearer ' ) : auth_info = BearerHeader ( endpoint_context ) . verify ( request , authorization_info ) auth_info [ 'method' ] = 'bearer_header' else : raise UnknownOrNoAuthnMethod ( authorization_info ) try : client_id = auth_info [ 'client_id' ] except KeyError : client_id = '' try : _token = auth_info [ 'token' ] except KeyError : logger . warning ( 'Unknown client ID' ) else : sinfo = endpoint_context . sdb [ _token ] auth_info [ 'client_id' ] = sinfo [ 'authn_req' ] [ 'client_id' ] else : try : _cinfo = endpoint_context . cdb [ client_id ] except KeyError : raise ValueError ( 'Unknown Client ID' ) else : if isinstance ( _cinfo , str ) : try : _cinfo = endpoint_context . cdb [ _cinfo ] except KeyError : raise ValueError ( 'Unknown Client ID' ) try : valid_client_info ( _cinfo ) except KeyError : logger . warning ( 'Client registration has timed out' ) raise ValueError ( 'Not valid client' ) else : try : endpoint_context . cdb [ client_id ] [ 'auth_method' ] [ request . __class__ . __name__ ] = auth_info [ 'method' ] except KeyError : try : endpoint_context . cdb [ client_id ] [ 'auth_method' ] = { request . __class__ . __name__ : auth_info [ 'method' ] } except KeyError : pass return auth_info
Initiated Guessing !
49,264
def _post_parse_request ( self , request , client_id = '' , ** kwargs ) : request = RefreshAccessTokenRequest ( ** request . to_dict ( ) ) try : keyjar = self . endpoint_context . keyjar except AttributeError : keyjar = "" request . verify ( keyjar = keyjar , opponent_id = client_id ) if "client_id" not in request : request [ "client_id" ] = client_id logger . debug ( "%s: %s" % ( request . __class__ . __name__ , sanitize ( request ) ) ) return request
This is where clients come to refresh their access tokens
49,265
def random_id ( length ) : def char ( ) : return random . choice ( string . ascii_letters + string . digits ) return "" . join ( char ( ) for _ in range ( length ) )
Generates a random ID of given length
49,266
def delayed_close ( closable ) : close = getattr ( closable , "close" , None ) if close : def replacement_close ( * args , ** kw ) : pass setattr ( closable , "close" , replacement_close ) try : yield closable finally : if close : setattr ( closable , "close" , close ) closable . close ( )
Delay close until this contextmanager dies
49,267
def map_sid2uid ( self , sid , uid ) : self . set ( 'sid2uid' , sid , uid ) self . set ( 'uid2sid' , uid , sid )
Store the connection between a Session ID and a User ID
49,268
def map_sid2sub ( self , sid , sub ) : self . set ( 'sid2sub' , sid , sub ) self . set ( 'sub2sid' , sub , sid )
Store the connection between a Session ID and a subject ID .
49,269
def get_subs_by_uid ( self , uid ) : res = set ( ) for sid in self . get ( 'uid2sid' , uid ) : res |= set ( self . get ( 'sid2sub' , sid ) ) return res
Find all subject identifiers that is connected to a User ID .
49,270
def remove_session_id ( self , sid ) : for uid in self . get ( 'sid2uid' , sid ) : self . remove ( 'uid2sid' , uid , sid ) self . delete ( 'sid2uid' , sid ) for sub in self . get ( 'sid2sub' , sid ) : self . remove ( 'sub2sid' , sub , sid ) self . delete ( 'sid2sub' , sid )
Remove all references to a specific Session ID
49,271
def remove_uid ( self , uid ) : for sid in self . get ( 'uid2sid' , uid ) : self . remove ( 'sid2uid' , sid , uid ) self . delete ( 'uid2sid' , uid )
Remove all references to a specific User ID
49,272
def remove_sub ( self , sub ) : for _sid in self . get ( 'sub2sid' , sub ) : self . remove ( 'sid2sub' , _sid , sub ) self . delete ( 'sub2sid' , sub )
Remove all references to a specific Subject ID
49,273
def up ( force = True , env = None , ** kwargs ) : "Starts a new experiment" inventory = os . path . join ( os . getcwd ( ) , "hosts" ) conf = Configuration . from_dictionnary ( provider_conf ) provider = Enos_vagrant ( conf ) roles , networks = provider . init ( ) check_networks ( roles , networks ) env [ "roles" ] = roles env [ "networks" ] = networks
Starts a new experiment
49,274
def convert_args ( test_fcn , * test_args ) : def wrapper ( origfcn ) : @ functools . wraps ( origfcn ) def newfcn ( * args , ** kwargs ) : argspec = getargspec ( origfcn ) kwargs . update ( zip ( argspec . args , args ) ) for a in test_args : if a in argspec . args : kwargs [ a ] = test_fcn ( kwargs [ a ] ) return origfcn ( ** kwargs ) return newfcn return wrapper
Decorator to be using in formulas to convert test_args depending on the test_fcn .
49,275
def get_public_attributes ( cls , as_list = True ) : attrs = ( a for a in dir ( cls ) if not a . startswith ( '_' ) ) if as_list : return list ( attrs ) return attrs
Return class attributes that are neither private nor magic .
49,276
def register ( self , newitems , * args , ** kwargs ) : newkeys = newitems . viewkeys ( ) if any ( self . viewkeys ( ) & newkeys ) : raise DuplicateRegItemError ( self . viewkeys ( ) & newkeys ) self . update ( newitems ) kwargs . update ( zip ( self . meta_names , args ) ) for k , v in kwargs . iteritems ( ) : meta = getattr ( self , k ) if v : if not v . viewkeys ( ) <= newkeys : raise MismatchRegMetaKeysError ( newkeys - v . viewkeys ( ) ) meta . update ( v )
Register newitems in registry .
49,277
def unregister ( self , items ) : items = _listify ( items ) meta_names = ( m for m in vars ( self ) . iterkeys ( ) if ( not m . startswith ( '_' ) and m not in dir ( Registry ) ) ) for m in meta_names : if m not in self . meta_names : raise AttributeError ( 'Meta name %s not listed.' ) for it in items : if it in self : self . pop ( it ) for m in ( getattr ( self , m_ ) for m_ in self . meta_names ) : if it in m : m . pop ( it )
Remove items from registry .
49,278
def default ( self , o ) : if isinstance ( o , Q_ ) : return o . magnitude elif isinstance ( o , np . ndarray ) : return o . tolist ( ) else : return super ( SimKitJSONEncoder , self ) . default ( o )
JSONEncoder default method that converts NumPy arrays and quantities objects to lists .
49,279
def set_meta ( mcs , bases , attr ) : meta = attr . pop ( mcs . _meta_cls , types . ClassType ( mcs . _meta_cls , ( ) , { } ) ) meta_attrs = get_public_attributes ( meta ) for base in bases : base_meta = getattr ( base , mcs . _meta_cls , None ) if base_meta is None : continue for a in get_public_attributes ( base_meta , as_list = False ) : if a in meta_attrs : continue setattr ( meta , a , getattr ( base_meta , a ) ) attr [ mcs . _meta_attr ] = meta return attr
Get all of the Meta classes from bases and combine them with this class .
49,280
def factory ( cls , ** kwargs ) : for name , obj in inspect . getmembers ( sys . modules [ __name__ ] ) : if inspect . isclass ( obj ) and issubclass ( obj , UserAuthnMethod ) : try : if obj . __name__ == cls : return obj ( ** kwargs ) except AttributeError : pass
Factory method that can be used to easily instantiate a class instance
49,281
def register ( self , new_formulas , * args , ** kwargs ) : kwargs . update ( zip ( self . meta_names , args ) ) super ( FormulaRegistry , self ) . register ( new_formulas , ** kwargs )
Register formula and meta data .
49,282
def create_blazar_client ( config , session ) : return blazar_client . Client ( session = session , service_type = "reservation" , region_name = os . environ [ "OS_REGION_NAME" ] )
Check the reservation creates a new one if nescessary .
49,283
def reconnecting ( count = None , backoff = None ) : reconn_params = { "count" : count , "backoff" : backoff } def retry_decorator ( func ) : @ wraps ( func ) def wrapper ( self , * args , ** kwargs ) : count = reconn_params [ "count" ] backoff = reconn_params [ "backoff" ] if count is None : count = self . connection_config . options . get ( 'reconnect_attempts' , 3 ) if backoff is None : backoff = self . connection_config . options . get ( 'reconnect_backoff' , 1.0 ) count = int ( count ) backoff = float ( backoff ) for attempt in range ( 1 , count + 1 ) : try : backoff_seconds = float ( attempt - 1 ) * backoff if backoff_seconds : logger . debug ( "sleeping {} seconds before attempt {}" . format ( backoff_seconds , attempt ) ) time . sleep ( backoff_seconds ) return func ( self , * args , ** kwargs ) except InterfaceError as e : e_msg = str ( e . e ) if "closed" in e_msg . lower ( ) : if attempt == count : logger . debug ( "all {} attempts failed" . format ( count ) ) raise else : logger . debug ( "attempt {}/{} failed, retrying" . format ( attempt , count ) ) else : raise return wrapper return retry_decorator
this is a very specific decorator meant to be used on Interface classes . It will attempt to reconnect if the connection is closed and run the same method again .
49,284
def escape_header ( val ) : if val is None : return None try : return quote ( val , encoding = "ascii" , safe = "/ " ) except ValueError : return "utf-8''" + quote ( val , encoding = "utf-8" , safe = "/ " )
Escapes a value so that it can be used in a mime header
49,285
def make_streams ( name , value , boundary , encoding ) : filename = None mime = None if isinstance ( value , collections . Mapping ) and "name" in value and "value" in value : filename = value [ "name" ] try : mime = value [ "mime" ] except KeyError : pass value = value [ "value" ] if not filename : filename = getattr ( value , "name" , None ) if filename : filename = os . path . split ( filename ) [ 1 ] mime = mime or "application/octet-stream" name , filename , mime = [ escape_header ( v ) for v in ( name , filename , mime ) ] stream = BytesIO ( ) stream . write ( "--{}\r\n" . format ( boundary ) . encode ( encoding ) ) if not filename : stream . write ( 'Content-Disposition: form-data; name="{}"\r\n' . format ( name ) . encode ( encoding ) ) else : stream . write ( 'Content-Disposition: form-data; name="{}"; filename="{}"\r\n' . format ( name , filename ) . encode ( encoding ) ) if mime : stream . write ( "Content-Type: {}\r\n" . format ( mime ) . encode ( encoding ) ) stream . write ( b"\r\n" ) if hasattr ( value , "read" ) : stream . seek ( 0 ) return stream , value , BytesIO ( "\r\n" . encode ( encoding ) ) value = value if isinstance ( value , ( str , bytes ) ) else json . dumps ( value ) if isinstance ( value , bytes ) : stream . write ( value ) else : stream . write ( value . encode ( encoding ) ) stream . write ( b"\r\n" ) stream . seek ( 0 ) return ( stream , )
Generates one or more streams for each name value pair
49,286
def len ( self ) : def stream_len ( stream ) : cur = stream . tell ( ) try : stream . seek ( 0 , 2 ) return stream . tell ( ) - cur finally : stream . seek ( cur ) return sum ( stream_len ( s ) for s in self . streams )
Length of the data stream
49,287
def headers ( self ) : return { "Content-Type" : ( "multipart/form-data; boundary={}" . format ( self . boundary ) ) , "Content-Length" : str ( self . len ) , "Content-Encoding" : self . encoding , }
All headers needed to make a request
49,288
def mk_pools ( things , keyfnc = lambda x : x ) : "Indexes a thing by the keyfnc to construct pools of things." pools = { } sthings = sorted ( things , key = keyfnc ) for key , thingz in groupby ( sthings , key = keyfnc ) : pools . setdefault ( key , [ ] ) . extend ( list ( thingz ) ) return pools
Indexes a thing by the keyfnc to construct pools of things .
49,289
def pick_things ( pools , key , n ) : "Picks a maximum of n things in a dict of indexed pool of things." pool = pools . get ( key ) if not pool : return [ ] things = pool [ : n ] del pool [ : n ] return things
Picks a maximum of n things in a dict of indexed pool of things .
49,290
def listen ( room ) : def onmessage ( m ) : print ( m ) if m . admin or m . nick == r . user . name : return if "parrot" in m . msg . lower ( ) : r . post_chat ( "ayy lmao" ) elif m . msg . lower ( ) in ( "lol" , "lel" , "kek" ) : r . post_chat ( "*kok" ) else : r . post_chat ( re . sub ( r"\blain\b" , "purpleadmin" , m . msg , re . I ) ) with Room ( room ) as r : r . user . change_nick ( "DumbParrot" ) r . add_listener ( "chat" , onmessage ) r . listen ( )
Open a volafile room and start listening to it
49,291
def average_dtu_configurations ( list_of_objects ) : result = DtuConfiguration ( ) if len ( list_of_objects ) == 0 : return result list_of_members = result . __dict__ . keys ( ) for member in list_of_members : result . __dict__ [ member ] = np . mean ( [ tmp_dtu . __dict__ [ member ] for tmp_dtu in list_of_objects ] ) return result
Return DtuConfiguration instance with averaged values .
49,292
def maxdiff_dtu_configurations ( list_of_objects ) : result = DtuConfiguration ( ) if len ( list_of_objects ) == 0 : return result list_of_members = result . __dict__ . keys ( ) for member in list_of_members : tmp_array = np . array ( [ tmp_dtu . __dict__ [ member ] for tmp_dtu in list_of_objects ] ) minval = tmp_array . min ( ) maxval = tmp_array . max ( ) result . __dict__ [ member ] = maxval - minval return result
Return DtuConfiguration instance with maximum differences .
49,293
def define_from_fits ( cls , fitsobj , extnum = 0 ) : with fits . open ( fitsobj ) as hdulist : image_header = hdulist [ extnum ] . header return cls . define_from_header ( image_header )
Define class object from header information in FITS file .
49,294
def define_from_dictionary ( cls , inputdict ) : self = DtuConfiguration ( ) for item in self . __dict__ : self . __dict__ [ item ] = inputdict [ item ] return self
Define class object from dictionary .
49,295
def define_from_values ( cls , xdtu , ydtu , zdtu , xdtu_0 , ydtu_0 , zdtu_0 ) : self = DtuConfiguration ( ) self . xdtu = xdtu self . ydtu = ydtu self . zdtu = zdtu self . xdtu_0 = xdtu_0 self . ydtu_0 = ydtu_0 self . zdtu_0 = zdtu_0 return self
Define class object from from provided values .
49,296
def closeto ( self , other , abserror ) : result = ( abs ( self . xdtu - other . xdtu ) <= abserror ) and ( abs ( self . ydtu - other . ydtu ) <= abserror ) and ( abs ( self . zdtu - other . zdtu ) <= abserror ) and ( abs ( self . xdtu_0 - other . xdtu_0 ) <= abserror ) and ( abs ( self . ydtu_0 - other . ydtu_0 ) <= abserror ) and ( abs ( self . zdtu_0 - other . zdtu_0 ) <= abserror ) return result
Check that all the members are equal within provided absolute error .
49,297
def init ( self , force_deploy = False ) : machines = self . provider_conf . machines networks = self . provider_conf . networks _networks = [ ] for network in networks : ipnet = IPNetwork ( network . cidr ) _networks . append ( { "netpool" : list ( ipnet ) [ 10 : - 10 ] , "cidr" : network . cidr , "roles" : network . roles , "gateway" : ipnet . ip } ) vagrant_machines = [ ] vagrant_roles = { } j = 0 for machine in machines : for _ in range ( machine . number ) : vagrant_machine = { "name" : "enos-%s" % j , "cpu" : machine . flavour_desc [ "core" ] , "mem" : machine . flavour_desc [ "mem" ] , "ips" : [ n [ "netpool" ] . pop ( ) for n in _networks ] , } vagrant_machines . append ( vagrant_machine ) for role in machine . roles : vagrant_roles . setdefault ( role , [ ] ) . append ( vagrant_machine ) j = j + 1 logger . debug ( vagrant_roles ) loader = FileSystemLoader ( searchpath = TEMPLATE_DIR ) env = Environment ( loader = loader , autoescape = True ) template = env . get_template ( 'Vagrantfile.j2' ) vagrantfile = template . render ( machines = vagrant_machines , provider_conf = self . provider_conf ) vagrantfile_path = os . path . join ( os . getcwd ( ) , "Vagrantfile" ) with open ( vagrantfile_path , 'w' ) as f : f . write ( vagrantfile ) v_env = dict ( os . environ ) v_env [ 'VAGRANT_DEFAULT_PROVIDER' ] = self . provider_conf . backend v = vagrant . Vagrant ( root = os . getcwd ( ) , quiet_stdout = False , quiet_stderr = False , env = v_env ) if force_deploy : v . destroy ( ) v . up ( ) v . provision ( ) roles = { } for role , machines in vagrant_roles . items ( ) : for machine in machines : keyfile = v . keyfile ( vm_name = machine [ 'name' ] ) port = v . port ( vm_name = machine [ 'name' ] ) address = v . hostname ( vm_name = machine [ 'name' ] ) roles . setdefault ( role , [ ] ) . append ( Host ( address , alias = machine [ 'name' ] , user = self . provider_conf . user , port = port , keyfile = keyfile ) ) networks = [ { 'cidr' : str ( n [ "cidr" ] ) , 'start' : str ( n [ "netpool" ] [ 0 ] ) , 'end' : str ( n [ "netpool" ] [ - 1 ] ) , 'dns' : '8.8.8.8' , 'gateway' : str ( n [ "gateway" ] ) , 'roles' : n [ "roles" ] } for n in _networks ] logger . debug ( roles ) logger . debug ( networks ) return ( roles , networks )
Reserve and deploys the vagrant boxes .
49,298
def destroy ( self ) : v = vagrant . Vagrant ( root = os . getcwd ( ) , quiet_stdout = False , quiet_stderr = True ) v . destroy ( )
Destroy all vagrant box involved in the deployment .
49,299
def tick ( self ) : instant_rate = self . count / float ( self . tick_interval_s ) self . count = 0 if self . initialized : self . rate += ( self . alpha * ( instant_rate - self . rate ) ) else : self . rate = instant_rate self . initialized = True
Mark the passage of time and decay the current rate accordingly .