idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
49,300
def merge_odd_even_csu_configurations ( conf_odd , conf_even ) : merged_conf = deepcopy ( conf_odd ) for i in range ( EMIR_NBARS ) : ibar = i + 1 if ibar % 2 == 0 : merged_conf . _csu_bar_left [ i ] = conf_even . _csu_bar_left [ i ] merged_conf . _csu_bar_right [ i ] = conf_even . _csu_bar_right [ i ] merged_conf . _csu_bar_slit_center [ i ] = conf_even . _csu_bar_slit_center [ i ] merged_conf . _csu_bar_slit_width [ i ] = conf_even . _csu_bar_slit_width [ i ] return merged_conf
Merge CSU configuration using odd - and even - numbered values .
49,301
def define_from_header ( cls , image_header ) : self = CsuConfiguration ( ) self . _csu_bar_left = [ ] self . _csu_bar_right = [ ] self . _csu_bar_slit_center = [ ] self . _csu_bar_slit_width = [ ] for i in range ( EMIR_NBARS ) : ibar = i + 1 keyword = 'CSUP{}' . format ( ibar ) if keyword in image_header : self . _csu_bar_left . append ( image_header [ keyword ] ) else : raise ValueError ( "Expected keyword " + keyword + " not found!" ) keyword = 'CSUP{}' . format ( ibar + EMIR_NBARS ) if keyword in image_header : self . _csu_bar_right . append ( 341.5 - image_header [ keyword ] ) else : raise ValueError ( "Expected keyword " + keyword + " not found!" ) self . _csu_bar_slit_center . append ( ( self . _csu_bar_left [ i ] + self . _csu_bar_right [ i ] ) / 2 ) self . _csu_bar_slit_width . append ( self . _csu_bar_right [ i ] - self . _csu_bar_left [ i ] ) return self
Define class members directly from FITS header .
49,302
def widths_in_range_mm ( self , minwidth = EMIR_MINIMUM_SLITLET_WIDTH_MM , maxwidth = EMIR_MAXIMUM_SLITLET_WIDTH_MM ) : list_ok = [ ] for i in range ( EMIR_NBARS ) : slitlet_ok = minwidth <= self . _csu_bar_slit_width [ i ] <= maxwidth if slitlet_ok : list_ok . append ( i + 1 ) return list_ok
Return list of slitlets which width is within given range
49,303
def encloses_annulus ( x_min , x_max , y_min , y_max , nx , ny , r_in , r_out ) : gout = circular_overlap_grid ( x_min , x_max , y_min , y_max , nx , ny , r_out , 1 , 1 ) gin = circular_overlap_grid ( x_min , x_max , y_min , y_max , nx , ny , r_in , 1 , 1 ) return gout - gin
Encloses function backported from old photutils
49,304
def ximshow_unrectified ( self , slitlet2d ) : title = "Slitlet#" + str ( self . islitlet ) ax = ximshow ( slitlet2d , title = title , first_pixel = ( self . bb_nc1_orig , self . bb_ns1_orig ) , show = False ) xdum = np . linspace ( 1 , EMIR_NAXIS1 , num = EMIR_NAXIS1 ) ylower = self . list_spectrails [ 0 ] ( xdum ) ax . plot ( xdum , ylower , 'b-' ) ymiddle = self . list_spectrails [ 1 ] ( xdum ) ax . plot ( xdum , ymiddle , 'b--' ) yupper = self . list_spectrails [ 2 ] ( xdum ) ax . plot ( xdum , yupper , 'b-' ) ylower_frontier = self . list_frontiers [ 0 ] ( xdum ) ax . plot ( xdum , ylower_frontier , 'b:' ) yupper_frontier = self . list_frontiers [ 1 ] ( xdum ) ax . plot ( xdum , yupper_frontier , 'b:' ) pause_debugplot ( debugplot = self . debugplot , pltshow = True )
Display unrectified image with spectrails and frontiers .
49,305
def ximshow_rectified ( self , slitlet2d_rect ) : title = "Slitlet#" + str ( self . islitlet ) + " (rectify)" ax = ximshow ( slitlet2d_rect , title = title , first_pixel = ( self . bb_nc1_orig , self . bb_ns1_orig ) , show = False ) xx = np . arange ( 0 , self . bb_nc2_orig - self . bb_nc1_orig + 1 , dtype = np . float ) for spectrail in self . list_spectrails : yy0 = self . corr_yrect_a + self . corr_yrect_b * spectrail ( self . x0_reference ) yy = np . tile ( [ yy0 - self . bb_ns1_orig ] , xx . size ) ax . plot ( xx + self . bb_nc1_orig , yy + self . bb_ns1_orig , "b" ) for spectrail in self . list_frontiers : yy0 = self . corr_yrect_a + self . corr_yrect_b * spectrail ( self . x0_reference ) yy = np . tile ( [ yy0 - self . bb_ns1_orig ] , xx . size ) ax . plot ( xx + self . bb_nc1_orig , yy + self . bb_ns1_orig , "b:" ) pause_debugplot ( self . debugplot , pltshow = True )
Display rectified image with spectrails and frontiers .
49,306
def is_expired ( self , token , when = 0 ) : verifier = JWT ( key_jar = self . key_jar , allowed_sign_algs = [ self . alg ] ) _payload = verifier . unpack ( token ) return is_expired ( _payload [ 'exp' ] , when )
Evaluate whether the token has expired or not
49,307
async def open_websocket ( url : str , headers : Optional [ list ] = None , subprotocols : Optional [ list ] = None ) : ws = await create_websocket ( url , headers = headers , subprotocols = subprotocols ) try : yield ws finally : await ws . close ( )
Opens a websocket .
49,308
async def create_websocket ( url : str , ssl : Optional [ SSLContext ] = None , headers : Optional [ list ] = None , subprotocols : Optional [ list ] = None ) : url = yarl . URL ( url ) args = { } if headers : args [ "headers" ] = headers if ssl is None : ssl = url . scheme == "wss" if ssl : if ssl is True : ssl = SSLContext ( ) args [ "ssl_context" ] = ssl args [ "autostart_tls" ] = True args [ "tls_standard_compatible" ] = False addr = ( url . host , int ( url . port ) ) ws = Websocket ( ) await ws . __ainit__ ( addr = addr , path = url . path_qs , subprotocols = subprotocols , ** args ) return ws
A more low - level form of open_websocket . You are responsible for closing this websocket .
49,309
async def open_websocket_client ( sock : anyio . abc . SocketStream , addr , path : str , headers : Optional [ list ] = None , subprotocols : Optional [ list ] = None ) : ws = await create_websocket_client ( sock , addr = addr , path = path , headers = headers , subprotocols = subprotocols ) try : yield ws finally : await ws . close ( )
Create a websocket on top of a socket .
49,310
async def create_websocket_client ( sock : anyio . abc . SocketStream , addr , path : str , headers : Optional [ List ] = None , subprotocols : Optional [ List [ str ] ] = None ) : ws = Websocket ( ) await ws . start_client ( sock , addr = addr , path = path , headers = headers , subprotocols = subprotocols ) return ws
A more low - level form of create_websocket_client . You are responsible for closing this websocket .
49,311
def _one ( self , query ) : try : result = query . one ( ) if result . has_expired ( self . _expirations ) : raise NotFoundError return result except ( NoResultFound , MultipleResultsFound ) : raise NotFoundError
Gets one row from the query . Raises NotFoundError if there isn t a row or if there are multiple rows
49,312
def _first ( self , query ) : result = query . first ( ) if result is None : raise NotFoundError else : if result . has_expired ( self . _expirations ) : raise NotFoundError return result
Gets the first row of the query . Raises NotFoundError if there isn t a row
49,313
def _all ( self , query ) : if query . count ( ) > 0 : results = query . all ( ) for result in results : if result . has_expired ( self . _expirations ) : raise NotFoundError return results else : raise NotFoundError
Gets all rows of the query . Raises a NotFoundError if there are 0 rows
49,314
def _put ( self , item : SQLBaseObject ) : if item . _dto_type in self . _expirations and self . _expirations [ item . _dto_type ] == 0 : return item . updated ( ) self . _session ( ) . merge ( item )
Puts a item into the database . Updates lastUpdate column
49,315
def _put_many ( self , items : Iterable [ DtoObject ] , cls ) : if cls . _dto_type in self . _expirations and self . _expirations [ cls . _dto_type ] == 0 : return session = self . _session for item in items : item = cls ( ** item ) item . updated ( ) session . merge ( item )
Puts many items into the database . Updates lastUpdate column for each of them
49,316
def filter ( self , userinfo , user_info_claims = None ) : if user_info_claims is None : return copy . copy ( userinfo ) else : result = { } missing = [ ] optional = [ ] for key , restr in user_info_claims . items ( ) : try : result [ key ] = userinfo [ key ] except KeyError : if restr == { "essential" : True } : missing . append ( key ) else : optional . append ( key ) return result
Return only those claims that are asked for . It s a best effort task ; if essential claims are not present no error is flagged .
49,317
def from_data ( room , conn , data ) : files = list ( ) rooms = dict ( ) msg = str ( ) for part in data [ "message" ] : ptype = part [ "type" ] if ptype == "text" : val = part [ "value" ] msg += val elif ptype == "break" : msg += "\n" elif ptype == "file" : fileid = part [ "id" ] fileobj = room . filedict . get ( fileid ) if fileobj : files += ( fileobj , ) fileid = f"@{fileid}" msg += fileid elif ptype == "room" : roomid = part [ "id" ] rooms [ roomid ] = part [ "name" ] roomid = f"#{roomid}" msg += roomid elif ptype == "url" : msg += part [ "text" ] elif ptype == "raw" : msg += html_to_text ( part [ "value" ] ) else : import warnings warnings . warn ( f"unknown message type '{ptype}'" , Warning ) nick = data . get ( "nick" ) or data . get ( "user" ) options = data . get ( "options" , dict ( ) ) data = data . get ( "data" , dict ( ) ) message = ChatMessage ( room , conn , nick , msg , roles = Roles . from_options ( options ) , options = options , data = data , files = files , rooms = rooms , ) return message
Construct a ChatMessage instance from raw protocol data
49,318
def create_rot2d ( angle ) : ca = math . cos ( angle ) sa = math . sin ( angle ) return np . array ( [ [ ca , - sa ] , [ sa , ca ] ] )
Create 2D rotation matrix
49,319
def comp_centroid ( data , bounding_box , debug_plot = False , plot_reference = None , logger = None ) : from matplotlib . patches import Ellipse if logger is None : logger = logging . getLogger ( __name__ ) region = bounding_box . slice ref_x = region [ 1 ] . start ref_y = region [ 0 ] . start logger . debug ( 'region ofset is %s, %s' , ref_x , ref_y ) subimage = data [ region ] . copy ( ) bkg = sep . Background ( subimage ) data_sub = subimage - bkg objects = sep . extract ( data_sub , 1.5 , err = bkg . globalrms ) logger . debug ( '%d object found' , len ( objects ) ) if len ( objects ) == 0 : return None iadx = objects [ 'flux' ] . argmax ( ) maxflux = objects [ iadx ] if debug_plot : fig , ax = plt . subplots ( ) m , s = np . mean ( data_sub ) , np . std ( data_sub ) ax . imshow ( data_sub , interpolation = 'nearest' , cmap = 'gray' , vmin = m - s , vmax = m + s , origin = 'lower' , extent = bounding_box . extent ) if plot_reference : e = Ellipse ( xy = ( plot_reference [ 0 ] , plot_reference [ 1 ] ) , width = 6 , height = 6 , angle = 0 ) e . set_facecolor ( 'none' ) e . set_edgecolor ( 'green' ) ax . add_artist ( e ) for idx , obj in enumerate ( objects ) : e = Ellipse ( xy = ( obj [ 'x' ] + ref_x , obj [ 'y' ] + ref_y ) , width = 6 * obj [ 'a' ] , height = 6 * obj [ 'b' ] , angle = obj [ 'theta' ] * 180. / np . pi ) e . set_facecolor ( 'none' ) if idx == iadx : e . set_edgecolor ( 'blue' ) else : e . set_edgecolor ( 'red' ) ax . add_artist ( e ) return maxflux [ 'x' ] , maxflux [ 'y' ] , ax else : return maxflux [ 'x' ] , maxflux [ 'y' ]
Detect objects in a region and return the centroid of the brightest one
49,320
def sign_encrypt ( self , session_info , client_id , code = None , access_token = None , user_info = None , sign = True , encrypt = False , extra_claims = None ) : _cntx = self . endpoint_context client_info = _cntx . cdb [ client_id ] alg_dict = get_sign_and_encrypt_algorithms ( _cntx , client_info , 'id_token' , sign = sign , encrypt = encrypt ) _authn_event = session_info [ 'authn_event' ] _idt_info = self . payload ( session_info , acr = _authn_event [ "authn_info" ] , alg = alg_dict [ 'sign_alg' ] , code = code , access_token = access_token , user_info = user_info , auth_time = _authn_event [ "authn_time" ] , extra_claims = extra_claims ) _jwt = JWT ( _cntx . keyjar , iss = _cntx . issuer , lifetime = _idt_info [ 'lifetime' ] , ** alg_dict ) return _jwt . pack ( _idt_info [ 'payload' ] , recv = client_id )
Signed and or encrypt a IDToken
49,321
def add ( self , src_cls , module , package = None ) : mod = importlib . import_module ( module , package ) self . sources [ src_cls ] = getattr ( mod , src_cls )
Add layer class to model . This method may be overloaded by layer .
49,322
def add ( self , data_source , module , package = None ) : super ( Data , self ) . add ( data_source , module , package ) if data_source not in self . layer : self . layer [ data_source ] = { 'module' : module , 'package' : package } self . objects [ data_source ] = None
Add data_source to model . Tries to import module then looks for data source class definition .
49,323
def open ( self , data_source , * args , ** kwargs ) : if self . sources [ data_source ] . _meta . data_reader . is_file_reader : filename = kwargs . get ( 'filename' ) path = kwargs . get ( 'path' , '' ) rel_path = kwargs . get ( 'rel_path' , '' ) if len ( args ) > 0 : filename = args [ 0 ] if len ( args ) > 1 : path = args [ 1 ] if len ( args ) > 2 : rel_path = args [ 2 ] args = ( ) kwargs = { 'filename' : os . path . join ( rel_path , path , filename ) } LOGGER . debug ( 'filename: %s' , kwargs [ 'filename' ] ) self . objects [ data_source ] = self . sources [ data_source ] ( * args , ** kwargs ) data_src_obj = self . objects [ data_source ] meta = [ getattr ( data_src_obj , m ) for m in self . reg . meta_names ] self . reg . register ( data_src_obj . data , * meta )
Open filename to get data for data_source .
49,324
def load ( self , rel_path = None ) : for k , v in self . layer . iteritems ( ) : self . add ( k , v [ 'module' ] , v . get ( 'package' ) ) filename = v . get ( 'filename' ) path = v . get ( 'path' ) if filename : if not path : path = rel_path else : path = os . path . join ( rel_path , path ) if isinstance ( filename , basestring ) : filename = os . path . join ( path , filename ) else : file_list = [ os . path . join ( path , f ) for f in filename ] filename = os . path . pathsep . join ( file_list ) self . open ( k , filename )
Add data_sources to layer and open files with data for the data_source .
49,325
def edit ( self , data_src , value ) : if 'filename' in value : items = [ k for k , v in self . reg . data_source . iteritems ( ) if v == data_src ] self . reg . unregister ( items ) self . open ( data_src , value [ 'filename' ] , value . get ( 'path' ) ) self . layer [ data_src ] . update ( value )
Edit data layer .
49,326
def delete ( self , data_src ) : items = self . objects [ data_src ] . data . keys ( ) self . reg . unregister ( items ) self . layer . pop ( data_src ) self . objects . pop ( data_src ) self . sources . pop ( data_src )
Delete data sources .
49,327
def load ( self , _ = None ) : for k , v in self . layer . iteritems ( ) : self . add ( k , v [ 'module' ] , v . get ( 'package' ) )
Add formulas to layer .
49,328
def add ( self , calc , module , package = None ) : super ( Calculations , self ) . add ( calc , module , package ) if calc not in self . layer : self . layer [ calc ] = { 'module' : module , 'package' : package } self . objects [ calc ] = self . sources [ calc ] ( ) calc_src_obj = self . objects [ calc ] meta = [ getattr ( calc_src_obj , m ) for m in self . reg . meta_names ] self . reg . register ( calc_src_obj . calcs , * meta )
Add calc to layer .
49,329
def add ( self , output , module , package = None ) : super ( Outputs , self ) . add ( output , module , package ) if output not in self . layer : self . layer [ output ] = { 'module' : module , 'package' : package } self . objects [ output ] = self . sources [ output ] ( ) out_src_obj = self . objects [ output ] meta = [ getattr ( out_src_obj , m ) for m in self . reg . meta_names ] self . reg . register ( out_src_obj . outputs , * meta )
Add output to
49,330
def add ( self , sim , module , package = None ) : super ( Simulations , self ) . add ( sim , module , package ) if sim not in self . layer : self . layer [ sim ] = { 'module' : module , 'package' : package }
Add simulation to layer .
49,331
def load ( self , rel_path = None ) : for k , v in self . layer . iteritems ( ) : self . add ( k , v [ 'module' ] , v . get ( 'package' ) ) filename = v . get ( 'filename' ) path = v . get ( 'path' ) if filename : warnings . warn ( DeprecationWarning ( SIMFILE_LOAD_WARNING ) ) if not path : path = rel_path else : path = os . path . join ( rel_path , path ) filename = os . path . join ( path , filename ) self . open ( k , filename )
Add sim_src to layer .
49,332
def f_total_irrad ( times , surface_tilt , surface_azimuth , solar_zenith , solar_azimuth , dni , ghi , dhi , dni_extra , am_abs , model = 'haydavies' ) : am_abs = am_abs . squeeze ( ) df = pd . DataFrame ( { 'solar_zenith' : solar_zenith , 'solar_azimuth' : solar_azimuth , 'dni' : dni , 'ghi' : ghi , 'dhi' : dhi , 'dni_extra' : dni_extra , 'am_abs' : am_abs } , index = times ) total_irrad = pvlib . irradiance . total_irrad ( surface_tilt , surface_azimuth , df [ 'solar_zenith' ] , df [ 'solar_azimuth' ] , df [ 'dni' ] , df [ 'ghi' ] , df [ 'dhi' ] , dni_extra = df [ 'dni_extra' ] , airmass = df [ 'am_abs' ] , model = model ) . fillna ( 0.0 ) poa_global = total_irrad [ 'poa_global' ] . values poa_direct = total_irrad [ 'poa_direct' ] . values poa_diffuse = total_irrad [ 'poa_diffuse' ] . values return poa_global , poa_direct , poa_diffuse
Calculate total irradiance
49,333
def creation_ordered ( class_to_decorate ) : next_index = functools . partial ( next , itertools . count ( ) ) __init__orig = class_to_decorate . __init__ @ functools . wraps ( __init__orig , assigned = [ '__doc__' ] ) def __init__ ( self , * args , ** kwargs ) : object . __setattr__ ( self , '_index' , next_index ( ) ) __init__orig ( self , * args , ** kwargs ) setattr ( class_to_decorate , '__init__' , __init__ ) def __lt__ ( self , other ) : return self . _index < other . _index setattr ( class_to_decorate , '__lt__' , __lt__ ) class_to_decorate = functools . total_ordering ( class_to_decorate ) return class_to_decorate
Class decorator that ensures that instances will be ordered after creation order when sorted .
49,334
def get_members ( cls , member_class = None , is_member = None , sort_key = None , _parameter = None ) : if member_class is None and is_member is None : raise TypeError ( "get_members either needs a member_class parameter or an is_member check function (or both)" ) members = OrderedDict ( ) for base in cls . __bases__ : if _parameter is None : inherited_members = get_members ( base , member_class = member_class , is_member = is_member , sort_key = sort_key ) else : inherited_members = get_declared ( base , _parameter ) members . update ( inherited_members ) def generate_member_bindings ( ) : for name in cls . __dict__ : if name . startswith ( '__' ) : continue obj = getattr ( cls , name ) if member_class is not None and isinstance ( obj , member_class ) : yield name , obj elif is_member is not None and is_member ( obj ) : yield name , obj elif type ( obj ) is tuple and len ( obj ) == 1 and isinstance ( obj [ 0 ] , member_class ) : raise TypeError ( "'%s' is a one-tuple containing what we are looking for. Trailing comma much? Don't... just don't." % name ) bindings = generate_member_bindings ( ) if sort_key is not None : try : sorted_bindings = sorted ( bindings , key = lambda x : sort_key ( x [ 1 ] ) ) except AttributeError : if sort_key is default_sort_key : raise TypeError ( 'Missing member ordering definition. Use @creation_ordered or specify sort_key' ) else : raise members . update ( sorted_bindings ) else : members . update ( bindings ) return members
Collect all class level attributes matching the given criteria .
49,335
def generate_rst_docs ( directory , classes , missing_objects = None ) : doc_by_filename = _generate_rst_docs ( classes = classes , missing_objects = missing_objects ) for filename , doc in doc_by_filename : with open ( directory + filename , 'w' ) as f2 : f2 . write ( doc )
Generate documentation for tri . declarative APIs
49,336
def reg_copy ( reg , keys = None ) : if keys is None : keys = reg . keys ( ) reg_cls = type ( reg ) new_reg = reg_cls ( ) mk = { } for m in reg_cls . meta_names : mstar = getattr ( reg , m , None ) if not mstar : mk [ m ] = None continue mk [ m ] = { } for k in keys : kstar = mstar . get ( k ) if kstar is not None : mk [ m ] [ k ] = kstar new_reg . register ( { k : reg [ k ] for k in keys } , ** mk ) return new_reg
Make a copy of a subset of a registry .
49,337
def listen_many ( * rooms ) : rooms = set ( r . conn for r in rooms ) for room in rooms : room . validate_listeners ( ) with ARBITRATOR . condition : while any ( r . connected for r in rooms ) : ARBITRATOR . condition . wait ( ) rooms = [ r for r in rooms if r . run_queues ( ) ] if not rooms : return
Listen for changes in all registered listeners in all specified rooms
49,338
def connect ( self , username , checksum , password = None , key = None ) : ws_url = ( f"{BASE_WS_URL}?room={self.room.room_id}&cs={checksum}&nick={username}" f"&rn={random_id(6)}&t={int(time.time() * 1000)}&transport=websocket&EIO=3" ) if password : ws_url += f"&password={password}" elif key : ws_url += f"&key={key}" ARBITRATOR . create_connection ( self . proto , ws_url , self . headers [ "User-Agent" ] , self . cookies ) self . __conn_barrier . wait ( )
Connect to websocket through asyncio http interface
49,339
def send_ack ( self ) : if self . last_ack == self . proto . max_id : return LOGGER . debug ( "ack (%d)" , self . proto . max_id ) self . last_ack = self . proto . max_id self . send_message ( f"4{to_json([self.proto.max_id])}" )
Send an ack message
49,340
def make_call ( self , fun , * args ) : obj = { "fn" : fun , "args" : list ( args ) } obj = [ self . proto . max_id , [ [ 0 , [ "call" , obj ] ] , self . proto . send_count ] ] self . send_message ( f"4{to_json(obj)}" ) self . proto . send_count += 1
Makes a regular API call
49,341
def make_call_with_cb ( self , fun , * args ) : cid , event = self . handler . register_callback ( ) argscp = list ( args ) argscp . append ( cid ) self . make_call ( fun , * argscp ) return event
Makes an API call with a callback to wait for
49,342
def make_api_call ( self , call , params ) : if not isinstance ( params , dict ) : raise ValueError ( "params argument must be a dictionary" ) kw = dict ( params = params , headers = { "Origin" : BASE_URL , "Referer" : f"{BASE_URL}/r/{self.room.name}" } , ) return self . get ( BASE_REST_URL + call , ** kw ) . json ( )
Make a REST API call
49,343
def reraise ( self , ex ) : self . exception = ex self . process_queues ( forced = True )
Reraise an exception passed by the event thread
49,344
def close ( self ) : if self . connected : obj = [ self . proto . max_id , [ [ 2 ] , self . proto . send_count ] ] ARBITRATOR . send_sync_message ( self . proto , f"4{to_json(obj)}" ) self . proto . send_count += 1 ARBITRATOR . close ( self . proto ) self . listeners . clear ( ) self . proto . connected = False super ( ) . close ( ) del self . room del self . proto
Closes connection pair
49,345
async def on_open ( self ) : self . __ensure_barrier ( ) while self . connected : try : if self . __lastping > self . __lastpong : raise IOError ( "Last ping remained unanswered" ) self . send_message ( "2" ) self . send_ack ( ) self . __lastping = time . time ( ) await asyncio . sleep ( self . ping_interval ) except Exception as ex : LOGGER . exception ( "Failed to ping" ) try : self . reraise ( ex ) except Exception : LOGGER . exception ( "failed to force close connection after ping error" ) break
DingDongmaster the connection is open
49,346
def on_message ( self , new_data ) : LOGGER . debug ( "new frame [%r]" , new_data ) try : what = int ( new_data [ 0 ] ) data = new_data [ 1 : ] data = data and from_json ( data ) if what == 0 : self . ping_interval = float ( data [ "pingInterval" ] ) / 1000 LOGGER . debug ( "adjusted ping interval" ) return if what == 1 : LOGGER . debug ( "received close" ) self . reraise ( IOError ( "Connection closed remotely" ) ) return if what == 3 : self . __lastpong = time . time ( ) LOGGER . debug ( "received a pong" ) return if what == 4 : self . _on_frame ( data ) return if what == 6 : LOGGER . debug ( "received noop" ) self . send_message ( "5" ) return LOGGER . debug ( "unhandled message: [%d] [%r]" , what , data ) except Exception as ex : self . reraise ( ex )
Processes incoming messages according to engine - io rules
49,347
def add_listener ( self , event_type , callback ) : if not self . connected : time . sleep ( 1 ) if self . exception : raise self . exception raise ConnectionError ( f"{self.room} is not connected" ) thread = get_thread_ident ( ) with self . lock : listener = self . listeners [ thread ] listener . add ( event_type , callback ) self . process_queues ( )
Add a listener for specific event type . You ll need to actually listen for changes using the listen method
49,348
def enqueue_data ( self , event_type , data ) : with self . lock : listeners = self . listeners . values ( ) for listener in listeners : listener . enqueue ( event_type , data ) self . must_process = True
Enqueue a data item for specific event type
49,349
def process_queues ( self , forced = False ) : with self . lock : if ( not forced and not self . must_process ) or not self . queues_enabled : return self . must_process = False ARBITRATOR . awaken ( )
Process queues if any have data queued
49,350
def __listeners_for_thread ( self ) : thread = get_thread_ident ( ) with self . lock : return [ l for tid , l in self . listeners . items ( ) if tid == thread ]
All Listeners for the current thread
49,351
def validate_listeners ( self ) : if self . exception : raise self . exception listeners = self . __listeners_for_thread if not sum ( len ( l ) for l in listeners ) : raise ValueError ( "No active listeners" )
Validates that some listeners are actually registered
49,352
def listen ( self ) : self . validate_listeners ( ) with ARBITRATOR . condition : while self . connected : ARBITRATOR . condition . wait ( ) if not self . run_queues ( ) : break
Listen for changes in all registered listeners .
49,353
def run_queues ( self ) : if self . exception : raise self . exception listeners = self . __listeners_for_thread return sum ( l . process ( ) for l in listeners ) > 0
Run all queues that have data queued
49,354
def __add_prop ( self , key , admin = False ) : def getter ( self ) : return self . config [ key ] def setter ( self , val ) : if admin and not self . admin : raise RuntimeError ( f"You can't set the {key} key without mod privileges" ) self . __set_config_value ( self . config . get_real_key ( key ) , val ) setattr ( self . __class__ , key , property ( getter , setter ) )
Add gettable and settable room config property during runtime
49,355
def __set_config_value ( self , key , value ) : self . check_owner ( ) params = { "room" : self . room_id , "config" : to_json ( { key : value } ) } resp = self . conn . make_api_call ( "setRoomConfig" , params ) if "error" in resp : raise RuntimeError ( f"{resp['error'].get('message') or resp['error']}" ) return resp
Sets a value for a room config
49,356
def listen ( self , once = False ) : if once : self . add_listener ( "time" , lambda _ : False ) return self . conn . listen ( )
Listen for changes in all registered listeners . Use add_listener before calling this funcion to listen for desired events or set once to True to listen for initial room information
49,357
def __expire_files ( self ) : self . __files = OrderedDict ( item for item in self . __files . items ( ) if not item [ 1 ] . expired )
Because files are always unclean
49,358
def filedict ( self , kv ) : k , v = kv if v is not None : self . __files . update ( { k : v } ) else : with suppress ( KeyError ) : del self . __files [ k ]
Updates filedict with single file entry or deletes given key if the value is False . Shouldn t be used by the user .
49,359
def get_user_stats ( self , name ) : req = self . conn . get ( BASE_URL + "/user/" + name ) if req . status_code != 200 or not name : return None return self . conn . make_api_call ( "getUserInfo" , { "name" : name } )
Return data about the given user . Returns None if user does not exist .
49,360
def upload_file ( self , filename , upload_as = None , blocksize = None , callback = None , information_callback = None , allow_timeout = False , ) : with delayed_close ( filename if hasattr ( filename , "read" ) else open ( filename , "rb" ) ) as file : filename = upload_as or os . path . split ( filename ) [ 1 ] try : file . seek ( 0 , 2 ) if file . tell ( ) > self . config . max_file : raise ValueError ( f"File must be at most {self.config.max_file >> 30} GB" ) finally : try : file . seek ( 0 ) except Exception : pass files = Data ( { "file" : { "name" : filename , "value" : file } } , blocksize = blocksize , callback = callback , ) headers = { "Origin" : BASE_URL } headers . update ( files . headers ) while True : key , server , file_id = self . _generate_upload_key ( allow_timeout = allow_timeout ) info = dict ( key = key , server = server , file_id = file_id , room = self . room_id , filename = filename , len = files . len , resumecount = 0 , ) if information_callback : if information_callback ( info ) is False : continue break params = { "room" : self . room_id , "key" : key , "filename" : filename } if self . key : params [ "roomKey" ] = self . key if self . password : params [ "password" ] = self . password while True : try : post = self . conn . post ( f"https://{server}/upload" , params = params , data = files , headers = headers , ) post . raise_for_status ( ) break except requests . exceptions . ConnectionError as ex : if "aborted" not in repr ( ex ) : raise try : resume = self . conn . get ( f"https://{server}/rest/uploadStatus" , params = { "key" : key , "c" : 1 } , ) . text resume = from_json ( resume ) resume = resume [ "receivedBytes" ] if resume <= 0 : raise ConnectionError ( "Cannot resume" ) file . seek ( resume ) files = Data ( { "file" : { "name" : filename , "value" : file } } , blocksize = blocksize , callback = callback , logical_offset = resume , ) headers . update ( files . headers ) params [ "startAt" ] = resume info [ "resumecount" ] += 1 if information_callback : information_callback ( info ) except requests . exceptions . ConnectionError as iex : if "aborted" not in repr ( iex ) : raise continue return file_id
Uploads a file with given filename to this room . You may specify upload_as to change the name it is uploaded as . You can also specify a blocksize and a callback if you wish . Returns the file s id on success and None on failure .
49,361
def close ( self ) : if hasattr ( self , "conn" ) : self . conn . close ( ) del self . conn if hasattr ( self , "user" ) : del self . user
Close connection to this room
49,362
def user_info ( self , kv ) : key , value = kv self . __user_info [ key ] = value
Sets user_info dict entry through a tuple .
49,363
def fileinfo ( self , fid ) : if not isinstance ( fid , str ) : raise TypeError ( "Your file ID must be a string" ) try : info = self . conn . make_call_with_cb ( "getFileinfo" , fid ) . get ( timeout = 5 ) if not info : warnings . warn ( f"Your query for file with ID: '{fid}' failed." , RuntimeWarning ) elif fid in self . __files and not self . __files [ fid ] . updated : self . __files [ fid ] . fileupdate ( info ) except queue . Empty as ex : raise ValueError ( "lain didn't produce a callback!\n" "Are you sure your query wasn't malformed?" ) from ex return info
Ask lain about what he knows about given file . If the given file exists in the file dict it will get updated .
49,364
def _generate_upload_key ( self , allow_timeout = False ) : while not self . user . nick : with ARBITRATOR . condition : ARBITRATOR . condition . wait ( ) while True : params = { "name" : self . user . nick , "room" : self . room_id , "c" : self . __upload_count , } if self . key : params [ "roomKey" ] = self . key if self . password : params [ "password" ] = self . password info = self . conn . make_api_call ( "getUploadKey" , params ) self . __upload_count += 1 try : return info [ "key" ] , info [ "server" ] , info [ "file_id" ] except Exception : to = int ( info . get ( "error" , { } ) . get ( "info" , { } ) . get ( "timeout" , 0 ) ) if to <= 0 or not allow_timeout : raise IOError ( f"Failed to retrieve key {info}" ) time . sleep ( to / 10000 )
Generates a new upload key
49,365
def delete_files ( self , ids ) : self . check_owner ( ) if not isinstance ( ids , list ) : raise TypeError ( "You must specify list of files to delete!" ) self . conn . make_call ( "deleteFiles" , ids )
Remove one or more files
49,366
def transfer_owner ( self , new_owner ) : if not self . owner and not self . admin : raise RuntimeError ( "You need more street creed to do this" ) new_owner = new_owner . strip ( ) . lower ( ) if not new_owner : raise ValueError ( "Empty strings cannot be new owners" ) self . __set_config_value ( "owner" , new_owner )
You had good run at it it s time for someone else to get dirty
49,367
def add_janitor ( self , janitor ) : if not self . owner and not self . admin : raise RuntimeError ( "Not enough street creed to do this" ) janitor = janitor . strip ( ) . lower ( ) if not janitor : raise ValueError ( "Empty strings cannot be janitors" ) if janitor in self . config . janitors : return self . config . janitors . append ( janitor ) self . __set_config_value ( "janitors" , self . config . janitors )
Add janitor to the room
49,368
def remove_janitor ( self , janitor ) : if not self . owner and not self . admin : raise RuntimeError ( "Not enough street creed to do this" ) janitor = janitor . strip ( ) . lower ( ) if not janitor : raise ValueError ( "Empty strings cannot be janitors" ) if janitor not in self . config . janitors : return self . config . janitors . remove ( janitor ) self . __set_config_value ( "janitors" , self . config . janitors )
Remove janitor from the room
49,369
def _make_env ( resultdir = None ) : env = { "config" : { } , "resultdir" : "" , "config_file" : "" , "nodes" : { } , "phase" : "" , "user" : "" , "cwd" : os . getcwd ( ) } if resultdir : env_path = os . path . join ( resultdir , "env" ) if os . path . isfile ( env_path ) : with open ( env_path , "r" ) as f : env . update ( yaml . load ( f ) ) logger . debug ( "Loaded environment %s" , env_path ) if "config_file" in env and env [ "config_file" ] is not None : if os . path . isfile ( env [ "config_file" ] ) : with open ( env [ "config_file" ] , "r" ) as f : env [ "config" ] . update ( yaml . load ( f ) ) logger . debug ( "Reloaded config %s" , env [ "config" ] ) return env
Loads the env from resultdir if not None or makes a new one .
49,370
def _save_env ( env ) : env_path = os . path . join ( env [ "resultdir" ] , "env" ) if os . path . isdir ( env [ "resultdir" ] ) : with open ( env_path , "w" ) as f : yaml . dump ( env , f )
Saves one environment .
49,371
def _set_resultdir ( name = None ) : resultdir_name = name or "enos_" + datetime . today ( ) . isoformat ( ) resultdir_path = os . path . abspath ( resultdir_name ) if os . path . isfile ( resultdir_path ) : raise EnosFilePathError ( resultdir_path , "Result directory cannot be created due " "to existing file %s" % resultdir_path ) if not os . path . isdir ( resultdir_path ) : os . mkdir ( resultdir_path ) logger . info ( "Generate results directory %s" % resultdir_path ) link_path = SYMLINK_NAME if os . path . lexists ( link_path ) : os . remove ( link_path ) try : os . symlink ( resultdir_path , link_path ) logger . info ( "Symlink %s to %s" % ( resultdir_path , link_path ) ) except OSError : logger . warning ( "Symlink %s to %s failed" % ( resultdir_path , link_path ) ) return resultdir_path
Set or get the directory to store experiment results .
49,372
def _is_cached ( self , ext = '.json' ) : if not ext . startswith ( '.' ) : ext = '.%s' % ext cache_file = '%s%s' % ( self . filename , ext ) return self . filename . endswith ( ext ) or os . path . exists ( cache_file )
Determine if filename is cached using extension ex a string .
49,373
def list_slitlets_from_string ( s , islitlet_min , islitlet_max ) : if not isinstance ( s , str ) : print ( 'type(s): ' , type ( s ) ) print ( 'ERROR: function expected a string parameter' ) set_slitlets = set ( ) s = re . sub ( '^ *' , '' , s ) s = re . sub ( ' *$' , '' , s ) s = re . sub ( ' *,' , ',' , s ) s = re . sub ( ', *' , ',' , s ) s = re . sub ( ' +' , ' ' , s ) stuples = s . split ( ) for item in stuples : subitems = item . split ( ',' ) nsubitems = len ( subitems ) if nsubitems == 1 : n1 = int ( subitems [ 0 ] ) n2 = n1 step = 1 elif nsubitems == 2 : n1 = int ( subitems [ 0 ] ) n2 = int ( subitems [ 1 ] ) step = 1 elif nsubitems == 3 : n1 = int ( subitems [ 0 ] ) n2 = int ( subitems [ 1 ] ) step = int ( subitems [ 2 ] ) else : raise ValueError ( 'Unexpected slitlet range:' , s ) for i in range ( n1 , n2 + 1 , step ) : if islitlet_min <= i <= islitlet_max : set_slitlets . add ( i ) else : print ( 'islitlet_min: ' , islitlet_min ) print ( 'islitlet_max: ' , islitlet_max ) print ( 'i...........: ' , i ) raise ValueError ( "Slitlet number out of range!" ) list_slitlets = list ( set_slitlets ) list_slitlets . sort ( ) return list_slitlets
Return list of slitlets from string specification .
49,374
def ref ( self , orm_classpath , cls_pk = None ) : orm_module , orm_class = get_objects ( orm_classpath ) q = orm_class . query if cls_pk : found = False for fn , f in orm_class . schema . fields . items ( ) : cls_ref_s = f . schema if cls_ref_s and self . schema == cls_ref_s : q . is_field ( fn , cls_pk ) found = True break if not found : raise ValueError ( "Did not find a foreign key field for [{}] in [{}]" . format ( self . orm_class . table_name , orm_class . table_name , ) ) return q
takes a classpath to allow query - ing from another Orm class
49,375
def select_fields ( self , * fields ) : if fields : if not isinstance ( fields [ 0 ] , basestring ) : fields = list ( fields [ 0 ] ) + list ( fields ) [ 1 : ] for field_name in fields : field_name = self . _normalize_field_name ( field_name ) self . select_field ( field_name ) return self
set multiple fields to be selected
49,376
def set_field ( self , field_name , field_val = None ) : field_name = self . _normalize_field_name ( field_name ) self . fields_set . append ( field_name , [ field_name , field_val ] ) return self
set a field into . fields attribute
49,377
def set_fields ( self , fields = None , * fields_args , ** fields_kwargs ) : if fields_args : fields = [ fields ] fields . extend ( fields_args ) for field_name in fields : self . set_field ( field_name ) elif fields_kwargs : fields = make_dict ( fields , fields_kwargs ) for field_name , field_val in fields . items ( ) : self . set_field ( field_name , field_val ) else : if isinstance ( fields , Mapping ) : for field_name , field_val in fields . items ( ) : self . set_field ( field_name , field_val ) else : for field_name in fields : self . set_field ( field_name ) return self
completely replaces the current . fields with fields and fields_kwargs combined
49,378
def nlike_field ( self , field_name , * field_val , ** field_kwargs ) : if not field_val : raise ValueError ( "Cannot NOT LIKE nothing" ) field_name = self . _normalize_field_name ( field_name ) fv = field_val [ 0 ] self . fields_where . append ( field_name , [ "nlike" , field_name , fv , field_kwargs ] ) return self
Perform a field_name NOT LIKE field_val query
49,379
def sort_field ( self , field_name , direction , field_vals = None ) : field_name = self . _normalize_field_name ( field_name ) if direction > 0 : direction = 1 elif direction < 0 : direction = - 1 else : raise ValueError ( "direction {} is undefined" . format ( direction ) ) self . fields_sort . append ( field_name , [ direction , field_name , list ( field_vals ) if field_vals else field_vals ] ) return self
sort this query by field_name in directrion
49,380
def get ( self , limit = None , page = None ) : has_more = False self . bounds . paginate = True limit_paginate , offset = self . bounds . get ( limit , page ) self . default_val = [ ] results = self . _query ( 'get' ) if limit_paginate : self . bounds . paginate = False if len ( results ) == limit_paginate : has_more = True results . pop ( - 1 ) it = ResultsIterator ( results , orm_class = self . orm_class , has_more = has_more , query = self ) return self . iterator_class ( it )
get results from the db
49,381
def get_one ( self ) : self . default_val = None o = self . default_val d = self . _query ( 'get_one' ) if d : o = self . orm_class ( d , hydrate = True ) return o
get one row from the db
49,382
def value ( self ) : field_vals = None field_names = self . fields_select . names ( ) fcount = len ( field_names ) if fcount : d = self . _query ( 'get_one' ) if d : field_vals = [ d . get ( fn , None ) for fn in field_names ] if fcount == 1 : field_vals = field_vals [ 0 ] else : raise ValueError ( "no select fields were set, so cannot return value" ) return field_vals
convenience method to just get one value or tuple of values for the query
49,383
def count ( self ) : fields_sort = self . fields_sort self . fields_sort = self . fields_sort_class ( ) self . default_val = 0 ret = self . _query ( 'count' ) self . fields_sort = fields_sort return ret
return the count of the criteria
49,384
def insert ( self ) : self . default_val = 0 return self . interface . insert ( self . schema , self . fields ) return self . interface . insert ( self . schema , self . fields )
persist the . fields
49,385
def update ( self ) : self . default_val = 0 return self . interface . update ( self . schema , self . fields , self )
persist the . fields using . fields_where
49,386
def cache_key ( self , method_name ) : key = "" method = getattr ( self , "cache_key_{}" . format ( method_name ) , None ) if method : key = method ( ) return key
decides if this query is cacheable returns a key if it is otherwise empty
49,387
def ttl ( self ) : ret = 3600 cn = self . get_process ( ) if "ttl" in cn : ret = cn [ "ttl" ] return ret
how long you should cache results for cacheable queries
49,388
def _read_header ( f , header_param ) : header_delim = str ( header_param . get ( 'delimiter' , ',' ) ) if 'fields' not in header_param : raise UnnamedDataError ( f . name ) header_fields = { field [ 0 ] : field [ 1 : ] for field in header_param [ 'fields' ] } header_names = [ field [ 0 ] for field in header_param [ 'fields' ] ] header_str = StringIO ( f . readline ( ) ) header_reader = csv . DictReader ( header_str , header_names , delimiter = header_delim , skipinitialspace = True ) data = header_reader . next ( ) for k , v in data . iteritems ( ) : header_type = header_fields [ k ] [ 0 ] if isinstance ( header_type , basestring ) : if header_type . lower ( ) . startswith ( 'int' ) : header_type = int elif header_type . lower ( ) . startswith ( 'long' ) : header_type = long elif header_type . lower ( ) . startswith ( 'float' ) : header_type = float elif header_type . lower ( ) . startswith ( 'str' ) : header_type = str elif header_type . lower ( ) . startswith ( 'bool' ) : header_type = bool else : raise TypeError ( '"%s" is not a supported type.' % header_type ) data [ k ] = header_type ( v ) if len ( header_fields [ k ] ) > 1 : units = UREG ( str ( header_fields [ k ] [ 1 ] ) ) data [ k ] = data [ k ] * units return data
Read and parse data from 1st line of a file .
49,389
def _apply_units ( data_data , data_units , fname ) : data_names = data_data . dtype . names if not data_names : raise UnnamedDataError ( fname ) data = dict . fromkeys ( data_names ) for data_name in data_names : if data_name in data_units : units = str ( data_units [ data_name ] ) data [ data_name ] = data_data [ data_name ] * UREG ( units ) elif np . issubdtype ( data_data [ data_name ] . dtype , str ) : data [ data_name ] = data_data [ data_name ] . tolist ( ) else : data [ data_name ] = data_data [ data_name ] return data
Apply units to data .
49,390
def _utf8_list_to_ascii_tuple ( utf8_list ) : for n , utf8 in enumerate ( utf8_list ) : utf8_list [ n ] [ 0 ] = str ( utf8 [ 0 ] ) utf8_list [ n ] [ 1 ] = str ( utf8 [ 1 ] ) utf8_list [ n ] = tuple ( utf8 )
Convert unicode strings in a list of lists to ascii in a list of tuples .
49,391
def load_data ( self , filename , * args , ** kwargs ) : if not filename . endswith ( '.json' ) : filename += '.json' with open ( filename , 'r' ) as fid : json_data = json . load ( fid ) if ( not self . orig_data_reader or isinstance ( self , self . orig_data_reader ) ) : return self . apply_units_to_cache ( json_data [ 'data' ] ) utc_mod_time = json_data . get ( 'utc_mod_time' ) orig_data_reader_obj = self . orig_data_reader ( self . parameters , self . meta ) if utc_mod_time : utc_mod_time = time . struct_time ( utc_mod_time ) orig_filename = filename [ : - 5 ] if utc_mod_time < time . gmtime ( os . path . getmtime ( orig_filename ) ) : os . remove ( filename ) return orig_data_reader_obj . load_data ( orig_filename ) return orig_data_reader_obj . apply_units_to_cache ( json_data [ 'data' ] )
Load JSON data .
49,392
def load_data ( self , filename , * args , ** kwargs ) : workbook = open_workbook ( filename , verbosity = True ) data = { } for param , pval in self . parameters . iteritems ( ) : sheet = pval [ 'extras' ] [ 'sheet' ] worksheet = workbook . sheet_by_name ( sheet ) prng0 , prng1 = pval [ 'extras' ] [ 'range' ] punits = str ( pval . get ( 'units' ) or '' ) if prng0 is None : prng0 = [ ] if prng1 is None : prng1 = [ ] if isinstance ( prng0 , int ) and isinstance ( prng1 , int ) : datum = worksheet . cell_value ( prng0 , prng1 ) elif isinstance ( prng0 , list ) and isinstance ( prng1 , int ) : datum = worksheet . col_values ( prng1 , * prng0 ) elif isinstance ( prng0 , int ) and isinstance ( prng1 , list ) : datum = worksheet . row_values ( prng0 , * prng1 ) else : datum = [ ] for col in xrange ( prng0 [ 1 ] , prng1 [ 1 ] ) : datum . append ( worksheet . col_values ( col , prng0 [ 0 ] , prng1 [ 0 ] ) ) try : npdatum = np . array ( datum , dtype = np . float ) except ValueError as err : if not datum : data [ param ] = None elif all ( isinstance ( _ , basestring ) for _ in datum ) : data [ param ] = datum elif all ( not _ for _ in datum ) : data [ param ] = None else : raise err else : data [ param ] = npdatum * UREG ( punits ) return data
Load parameters from Excel spreadsheet .
49,393
def load_data ( self , filename , * args , ** kwargs ) : data = super ( ParameterizedXLS , self ) . load_data ( filename ) parameter_name = self . parameterization [ 'parameter' ] [ 'name' ] parameter_values = self . parameterization [ 'parameter' ] [ 'values' ] parameter_units = str ( self . parameterization [ 'parameter' ] [ 'units' ] ) data [ parameter_name ] = parameter_values * UREG ( parameter_units ) num_sheets = len ( self . parameterization [ 'parameter' ] [ 'sheets' ] ) for key in self . parameterization [ 'data' ] : units = str ( self . parameterization [ 'data' ] [ key ] . get ( 'units' ) ) or '' datalist = [ ] for n in xrange ( num_sheets ) : k = key + '_' + str ( n ) datalist . append ( data [ k ] . reshape ( ( 1 , - 1 ) ) ) data . pop ( k ) data [ key ] = np . concatenate ( datalist , axis = 0 ) * UREG ( units ) return data
Load parameterized data from different sheets .
49,394
def load_data ( self , filename , * args , ** kwargs ) : data = super ( MixedTextXLS , self ) . load_data ( filename ) for sheet_params in self . parameters . itervalues ( ) : for param , pval in sheet_params . iteritems ( ) : pattern = pval . get ( 'pattern' , EFG_PATTERN ) re_meth = pval . get ( 'method' , 'search' ) if re_meth in RE_METH : re_meth = getattr ( re , pval . get ( 'method' , 'search' ) ) else : msg = 'Only' , '"%s", ' * len ( RE_METH ) % tuple ( RE_METH ) msg += 'regex methods are allowed.' raise AttributeError ( msg ) match = re_meth ( pattern , data [ param ] ) if match : try : match = match . groups ( ) except AttributeError : match = [ m . groups ( ) for m in match ] npdata = np . array ( match , dtype = float ) . squeeze ( ) data [ param ] = npdata * UREG ( str ( pval . get ( 'units' ) or '' ) ) else : raise MixedTextNoMatchError ( re_meth , pattern , data [ param ] ) return data
Load text data from different sheets .
49,395
def mark ( self , n = 1 ) : self . tick_if_necessary ( ) self . count += n self . m1_rate . update ( n ) self . m5_rate . update ( n ) self . m15_rate . update ( n )
Mark the occurrence of a given number of events .
49,396
def _update ( self , layer = None ) : meta = getattr ( self , ModelBase . _meta_attr ) if not layer : layers = self . layers else : layers = _listify ( layer ) for layer in layers : path = os . path . abspath ( os . path . join ( meta . modelpath , layer ) ) getattr ( self , layer ) . load ( path )
Update layers in model .
49,397
def _initialize ( self ) : meta = getattr ( self , ModelBase . _meta_attr ) if self . param_file is not None : self . _load ( ) LOGGER . debug ( 'model:\n%r' , self . model ) mod = importlib . import_module ( meta . layers_mod , meta . layers_pkg ) src_model = { } for layer , value in self . model . iteritems ( ) : layer_cls = getattr ( mod , meta . layer_cls_names [ layer ] ) self . layers [ layer ] = layer_cls src_value = { } for src in value [ 'sources' ] : try : src , kwargs = src except ( TypeError , ValueError ) : kwargs = { } if isinstance ( src , basestring ) : continue src_value [ src . __name__ ] = { 'module' : src . __module__ , 'package' : None } src_value [ src . __name__ ] . update ( kwargs ) if src_value : value = src_model [ layer ] = src_value else : srcmod , srcpkg = value . get ( 'module' ) , value . get ( 'package' ) try : value = dict ( value [ 'sources' ] ) except ValueError : value = dict . fromkeys ( value [ 'sources' ] , { } ) for src in value . viewkeys ( ) : if srcmod is not None : value [ src ] [ 'module' ] = srcmod if srcpkg is not None : value [ src ] [ 'package' ] = srcpkg setattr ( self , layer , layer_cls ( value ) ) if src_model : self . model . update ( src_model ) self . _update ( ) self . _state = 'initialized'
Initialize model and layers .
49,398
def load ( self , modelfile , layer = None ) : self . param_file = modelfile self . _load ( layer ) self . _update ( layer )
Load or update a model or layers in a model .
49,399
def edit ( self , layer , item , delete = False ) : if hasattr ( self , layer ) : layer_obj = getattr ( self , layer ) else : raise AttributeError ( 'missing layer: %s' , layer ) if delete : return layer_obj for k , v in item . iteritems ( ) : if k in layer_obj . layer : layer_obj . edit ( k , v ) else : raise AttributeError ( 'missing layer item: %s' , k ) if k in self . model [ layer ] : self . model [ layer ] [ k ] . update ( v ) else : raise AttributeError ( 'missing model layer item: %s' , k )
Edit model .