idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
13,300 | def make_links ( current_cn ) : if not os . path . isfile ( CURRENT_CERTIFICATE_PATH ) : target = '{}/{}.crt' . format ( CERTIFICATES_PATH , current_cn ) print 'Create symlink {} -> {}' . format ( CURRENT_CERTIFICATE_PATH , target ) os . symlink ( target , CURRENT_CERTIFICATE_PATH ) if not os . path . isfile ( CURRENT_PRIVATE_KEY_PATH ) : target = '{}/{}.key' . format ( CERTIFICATES_PATH , current_cn ) print 'Create symlink {} -> {}' . format ( CURRENT_PRIVATE_KEY_PATH , target ) os . symlink ( target , CURRENT_PRIVATE_KEY_PATH ) | Create symlink for nginx |
13,301 | def create ( self , agent_cls = None , n_agents = 10 , agent_kwargs = { } , env_cls = Environment , env_kwargs = { } , callback = None , conns = 0 , log_folder = None ) : if not issubclass ( env_cls , Environment ) : raise TypeError ( "Environment class must be derived from ({}" . format ( Environment . __class__ . __name__ ) ) if callback is not None and not hasattr ( callback , '__call__' ) : raise TypeError ( "Callback must be callable." ) if hasattr ( agent_cls , '__iter__' ) : for e in agent_cls : if not issubclass ( e , CreativeAgent ) : raise TypeError ( "All agent classes must be derived from {}" . format ( CreativeAgent . __class__ . __name__ ) ) else : if not issubclass ( agent_cls , CreativeAgent ) : raise TypeError ( "Agent class must be derived from {}" . format ( CreativeAgent . __class__ . __name__ ) ) env = env_cls . create ( ** env_kwargs ) agents = [ ] if hasattr ( agent_cls , '__iter__' ) : for i in range ( len ( n_agents ) ) : agent_kwargs [ i ] [ 'environment' ] = env agent_kwargs [ i ] [ 'log_folder' ] = log_folder agents = agents + [ agent_cls [ i ] ( ** agent_kwargs [ i ] ) for e in range ( n_agents [ i ] ) ] else : agent_kwargs [ 'environment' ] = env agent_kwargs [ 'log_folder' ] = log_folder agents = [ agent_cls ( ** agent_kwargs ) for e in range ( n_agents ) ] if conns > 0 : env . create_random_connections ( n = conns ) return Simulation ( env , callback , log_folder ) | A convenience function to create simple simulations . |
13,302 | def _init_step ( self ) : self . _age += 1 self . env . age = self . _age self . _log ( logging . INFO , "" ) self . _log ( logging . INFO , "\t***** Step {:0>4} *****" . format ( self . age ) ) self . _log ( logging . INFO , "" ) self . _agents_to_act = self . _get_order_agents ( ) self . _step_processing_time = 0.0 self . _step_start_time = time . time ( ) | Initialize next step of simulation to be run . |
13,303 | def _finalize_step ( self ) : t = time . time ( ) if self . _callback is not None : self . _callback ( self . age ) t2 = time . time ( ) self . _step_processing_time += t2 - t self . _log ( logging . INFO , "Step {} run in: {:.3f}s ({:.3f}s of " "actual processing time used)" . format ( self . age , self . _step_processing_time , t2 - self . _step_start_time ) ) self . _processing_time += self . _step_processing_time | Finalize simulation step after all agents have acted for the current step . |
13,304 | def async_step ( self ) : assert len ( self . _agents_to_act ) == 0 self . _init_step ( ) t = time . time ( ) aiomas . run ( until = self . env . trigger_all ( ) ) self . _agents_to_act = [ ] self . _step_processing_time = time . time ( ) - t self . _finalize_step ( ) | Progress simulation by running all agents once asynchronously . |
13,305 | def steps ( self , n ) : assert len ( self . _agents_to_act ) == 0 for _ in range ( n ) : self . step ( ) | Progress simulation with given amount of steps . |
13,306 | def step ( self ) : assert len ( self . _agents_to_act ) == 0 self . next ( ) while len ( self . _agents_to_act ) > 0 : self . next ( ) | Progress simulation with a single step . |
13,307 | def end ( self , folder = None ) : ret = self . env . destroy ( folder = folder ) self . _end_time = time . time ( ) self . _log ( logging . DEBUG , "Simulation run with {} steps took {:.3f}s to" " complete, while actual processing time was {:.3f}s." . format ( self . age , self . _end_time - self . _start_time , self . _processing_time ) ) return ret | End the simulation and destroy the current simulation environment . |
13,308 | def get_unique_record ( self , sql , parameters = None , quiet = False , locked = False ) : results = self . execute_select ( sql , parameters = parameters , quiet = quiet , locked = locked ) assert ( len ( results ) == 1 ) return results [ 0 ] | I use this pattern a lot . Return the single record corresponding to the query . |
13,309 | def run_transaction ( self , command_list , do_commit = True ) : pass for c in command_list : if c . find ( ";" ) != - 1 or c . find ( "\\G" ) != - 1 : raise Exception ( "The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % c ) if do_commit : sql = "START TRANSACTION;\n%s;\nCOMMIT" % "\n" . join ( command_list ) else : sql = "START TRANSACTION;\n%s;" % "\n" . join ( command_list ) return | This can be used to stage multiple commands and roll back the transaction if an error occurs . This is useful if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur if the entity is tied to table not specified in the list of commands . Performing this as a transaction avoids the situation where the records are partially removed . If do_commit is false the entire transaction is cancelled . |
13,310 | def callproc ( self , procname , parameters = ( ) , quiet = False , expect_return_value = False ) : self . procedures_run += 1 i = 0 errcode = 0 caughte = None out_param_indices = [ ] for j in range ( len ( parameters ) ) : p = parameters [ j ] if type ( p ) == type ( '' ) and p [ 0 ] == '@' : assert ( p . find ( ' ' ) == - 1 ) out_param_indices . append ( j ) if procname not in self . list_stored_procedures ( ) : raise Exception ( "The stored procedure '%s' does not exist." % procname ) if not re . match ( "^\s*\w+\s*$" , procname ) : raise Exception ( "Expected a stored procedure name in callproc but received '%s'." % procname ) while i < self . numTries : i += 1 try : self . _get_connection ( ) cursor = self . connection . cursor ( ) if type ( parameters ) != type ( ( ) ) : parameters = ( parameters , ) errcode = cursor . callproc ( procname , parameters ) self . lastrowid = int ( cursor . lastrowid ) cursor . close ( ) out_param_results = [ ] if out_param_indices : out_param_results = self . execute ( 'SELECT %s' % ", " . join ( [ '@_%s_%d AS %s' % ( procname , pindex , parameters [ pindex ] [ 1 : ] ) for pindex in out_param_indices ] ) ) return out_param_results except MySQLdb . OperationalError , e : self . _close_connection ( ) errcode = e [ 0 ] caughte = e continue except : self . _close_connection ( ) traceback . print_exc ( ) break if not quiet : sys . stderr . write ( "\nSQL execution error call stored procedure %s at %s:" % ( procname , datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) ) ) sys . stderr . write ( "\nErrorcode/Error: %d - '%s'.\n" % ( errcode , str ( caughte ) ) ) sys . stderr . flush ( ) raise MySQLdb . OperationalError ( caughte ) | Calls a MySQL stored procedure procname and returns the return values . This uses DictCursor . To get return values back out of a stored procedure prefix the parameter with a |
13,311 | def t_insert_dict_if_new ( self , tblname , d , PKfields , fields = None ) : SQL , values = self . _insert_dict_if_new_inner ( tblname , d , PKfields , fields = fields ) if SQL != False : self . execute_select ( SQL , parameters = values , locked = True ) return True , d return False , values | A version of insertDictIfNew for transactions . This does not call commit . |
13,312 | def create_insert_dict_string ( self , tblname , d , PKfields = [ ] , fields = None , check_existing = False ) : if type ( PKfields ) == type ( "" ) : PKfields = [ PKfields ] if fields == None : fields = sorted ( d . keys ( ) ) values = None SQL = None try : wherestr = [ ] PKvalues = [ ] for PKfield in PKfields : if d [ PKfield ] == None : wherestr . append ( "%s IS NULL" % PKfield ) else : wherestr . append ( "%s=%%s" % PKfield ) PKvalues . append ( d [ PKfield ] ) PKfields = join ( PKfields , "," ) wherestr = join ( wherestr , " AND " ) record_exists = None if check_existing : record_exists = not ( not ( self . execute_select ( "SELECT %s FROM %s" % ( PKfields , tblname ) + " WHERE %s" % wherestr , parameters = tuple ( PKvalues ) , locked = False ) ) ) SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tblname , join ( fields , ", " ) , join ( [ '%s' for x in range ( len ( fields ) ) ] , ',' ) ) values = tuple ( [ d [ k ] for k in fields ] ) return SQL , values , record_exists except Exception , e : raise Exception ( "Error occurred during database insertion: '%s'. %s" % ( str ( e ) , traceback . format_exc ( ) ) ) | The main function of the insert_dict functions . This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database . |
13,313 | def bytes_to_int ( byte_array , big_endian = True , signed = False ) : if six . PY3 : order = 'little' if big_endian : order = 'big' return int . from_bytes ( byte_array , byteorder = order , signed = signed ) else : length = len ( byte_array ) if length == 1 : code = 'B' elif length == 2 : code = 'H' elif length == 4 : code = 'L' elif length == 8 : code = 'Q' else : raise Exception ( "bytes_to_int : length of byte_array should be 1, 2, 4, or 8" ) if big_endian : code = '>' + code else : code = '<' + code if signed : code = code . lower ( ) return struct . unpack ( code , byte_array ) [ 0 ] | Converts a byte array to an integer . |
13,314 | def ip_to_bytes ( ip_str , big_endian = True ) : if big_endian : code = '>L' else : code = '<L' return bytes ( struct . unpack ( code , socket . inet_aton ( ip_str ) ) [ 0 ] ) | Converts an IP given as a string to a byte sequence |
13,315 | def get_file ( self , attr_name ) : return os . path . abspath ( os . path . join ( self . folder , "{}.log" . format ( attr_name ) ) ) | Return absolute path to logging file for obj s attribute . |
13,316 | def log_attr ( self , level , attr_name ) : msg = self . write ( attr_name ) self . log ( level , msg ) | Log attribute to file and pass the message to underlying logger . |
13,317 | def write ( self , attr_name , prefix = None ) : if self . _folder is None : return separator = "\t" attr = getattr ( self . obj , attr_name ) if hasattr ( attr , '__iter__' ) : msg = separator . join ( [ str ( e ) for e in attr ] ) else : msg = str ( attr ) if prefix is not None : msg = "{}\t{}" . format ( getattr ( self . obj , prefix ) , msg ) path = self . get_file ( attr_name ) with open ( path , 'a' ) as f : f . write ( "{}\n" . format ( msg ) ) return msg | Write attribute s value to a file . |
13,318 | def is_lambda ( fun ) : return isinstance ( fun , type ( LAMBDA ) ) and fun . __name__ == LAMBDA . __name__ | Check whether the given function is a lambda function . |
13,319 | def fixed_point ( is_zero , plus , minus , f , x ) : @ memo_Y def _fixed_point ( fixed_point_fun ) : def __fixed_point ( collected , new ) : diff = minus ( new , collected ) if is_zero ( diff ) : return collected return fixed_point_fun ( plus ( collected , diff ) , f ( diff ) ) return __fixed_point return _fixed_point ( x , f ( x ) ) | Get the least fixed point when it can be computed piecewise . |
13,320 | def memo_Y ( f ) : sub = { } def Yf ( * args ) : hashable_args = tuple ( [ repr ( x ) for x in args ] ) if args : if hashable_args not in sub : ret = sub [ hashable_args ] = f ( Yf ) ( * args ) else : ret = sub [ hashable_args ] return ret return f ( Yf ) ( ) return f ( Yf ) | Memoized Y combinator . |
13,321 | def install ( application , default_content_type , encoding = None ) : try : settings = application . settings [ SETTINGS_KEY ] except KeyError : settings = application . settings [ SETTINGS_KEY ] = ContentSettings ( ) settings . default_content_type = default_content_type settings . default_encoding = encoding return settings | Install the media type management settings . |
13,322 | def get_settings ( application , force_instance = False ) : try : return application . settings [ SETTINGS_KEY ] except KeyError : if not force_instance : return None return install ( application , None ) | Retrieve the media type settings for a application . |
13,323 | def add_binary_content_type ( application , content_type , pack , unpack ) : add_transcoder ( application , handlers . BinaryContentHandler ( content_type , pack , unpack ) ) | Add handler for a binary content type . |
13,324 | def add_text_content_type ( application , content_type , default_encoding , dumps , loads ) : parsed = headers . parse_content_type ( content_type ) parsed . parameters . pop ( 'charset' , None ) normalized = str ( parsed ) add_transcoder ( application , handlers . TextContentHandler ( normalized , dumps , loads , default_encoding ) ) | Add handler for a text content type . |
13,325 | def add_transcoder ( application , transcoder , content_type = None ) : settings = get_settings ( application , force_instance = True ) settings [ content_type or transcoder . content_type ] = transcoder | Register a transcoder for a specific content type . |
13,326 | def set_default_content_type ( application , content_type , encoding = None ) : settings = get_settings ( application , force_instance = True ) settings . default_content_type = content_type settings . default_encoding = encoding | Store the default content type for an application . |
13,327 | def get_response_content_type ( self ) : if self . _best_response_match is None : settings = get_settings ( self . application , force_instance = True ) acceptable = headers . parse_accept ( self . request . headers . get ( 'Accept' , settings . default_content_type if settings . default_content_type else '*/*' ) ) try : selected , _ = algorithms . select_content_type ( acceptable , settings . available_content_types ) self . _best_response_match = '/' . join ( [ selected . content_type , selected . content_subtype ] ) if selected . content_suffix is not None : self . _best_response_match = '+' . join ( [ self . _best_response_match , selected . content_suffix ] ) except errors . NoMatch : self . _best_response_match = settings . default_content_type return self . _best_response_match | Figure out what content type will be used in the response . |
13,328 | def send_response ( self , body , set_content_type = True ) : settings = get_settings ( self . application , force_instance = True ) handler = settings [ self . get_response_content_type ( ) ] content_type , data_bytes = handler . to_bytes ( body ) if set_content_type : self . set_header ( 'Content-Type' , content_type ) self . add_header ( 'Vary' , 'Accept' ) self . write ( data_bytes ) | Serialize and send body in the response . |
13,329 | def connections_from_graph ( env , G , edge_data = False ) : if not issubclass ( G . __class__ , ( Graph , DiGraph ) ) : raise TypeError ( "Graph structure must be derived from Networkx's " "Graph or DiGraph." ) if not hasattr ( env , 'get_agents' ) : raise TypeError ( "Parameter 'env' must have get_agents." ) addrs = env . get_agents ( addr = True ) if len ( addrs ) != len ( G ) : raise ValueError ( "The number of graph nodes and agents in the " "environment (excluding the manager agent) must " "match. Now got {} nodes and {} agents." . format ( len ( G ) , len ( addrs ) ) ) addrs = sort_addrs ( addrs ) _addrs2nodes ( addrs , G ) conn_map = _edges2conns ( G , edge_data ) env . create_connections ( conn_map ) | Create connections for agents in the given environment from the given NetworkX graph structure . |
13,330 | def graph_from_connections ( env , directed = False ) : G = DiGraph ( ) if directed else Graph ( ) conn_list = env . get_connections ( data = True ) for agent , conns in conn_list : G . add_node ( agent ) ebunch = [ ] for nb , data in conns . items ( ) : ebunch . append ( ( agent , nb , data ) ) if len ( ebunch ) > 0 : G . add_edges_from ( ebunch ) return G | Create NetworkX graph from agent connections in a given environment . |
13,331 | def _addrs2nodes ( addrs , G ) : for i , n in enumerate ( G . nodes ( ) ) : G . node [ n ] [ 'addr' ] = addrs [ i ] | Map agent addresses to nodes in the graph . |
13,332 | def _edges2conns ( G , edge_data = False ) : cm = { } for n in G . nodes ( data = True ) : if edge_data : cm [ n [ 1 ] [ 'addr' ] ] = [ ( G . node [ nb ] [ 'addr' ] , G [ n [ 0 ] ] [ nb ] ) for nb in G [ n [ 0 ] ] ] else : cm [ n [ 1 ] [ 'addr' ] ] = [ ( G . node [ nb ] [ 'addr' ] , { } ) for nb in G [ n [ 0 ] ] ] return cm | Create a mapping from graph edges to agent connections to be created . |
13,333 | def profile ( request , status = 200 ) : if request . method == 'GET' : if request . GET . get ( "username" , False ) : try : user_profile = User . objects . get ( username = request . GET . get ( "username" ) , userprofile__public = True ) . userprofile except ObjectDoesNotExist : raise Http404 ( "user not found or have not public profile" ) else : user_id = get_user_id ( request ) if get_config ( 'proso_user' , 'google.openid.migration' , default = True ) and not is_user_id_overridden ( request ) : migrated_user = migrate_google_openid_user ( request . user ) if migrated_user is not None : auth . logout ( request ) migrated_user . backend = 'social.backends.google.GoogleOAuth2' auth . login ( request , migrated_user ) user_profile = get_object_or_404 ( UserProfile , user_id = user_id ) return render_json ( request , user_profile , status = status , template = 'user_profile.html' , help_text = profile . __doc__ ) elif request . method == 'POST' : with transaction . atomic ( ) : to_save = json_body ( request . body . decode ( "utf-8" ) ) user_id = get_user_id ( request ) user_profile = get_object_or_404 ( UserProfile , user_id = user_id ) user = to_save . get ( 'user' , None ) if 'send_emails' in to_save : user_profile . send_emails = bool ( to_save [ 'send_emails' ] ) if 'public' in to_save : user_profile . public = bool ( to_save [ 'public' ] ) if user : error = _save_user ( request , user , new = False ) if error : return render_json ( request , error , template = 'user_json.html' , status = 400 ) if 'properties' in to_save : user_profile . save_properties ( to_save [ 'properties' ] ) user_profile . save ( ) request . method = "GET" return profile ( request , status = 202 ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) ) | Get the user s profile . If the user has no assigned profile the HTTP 404 is returned . Make a POST request to modify the user s profile . |
13,334 | def signup ( request ) : if request . method == 'GET' : return render ( request , 'user_signup.html' , { } , help_text = signup . __doc__ ) elif request . method == 'POST' : if request . user . is_authenticated ( ) and hasattr ( request . user , "userprofile" ) : return render_json ( request , { 'error' : _ ( 'User already logged in' ) , 'error_type' : 'username_logged' } , template = 'user_json.html' , status = 400 ) credentials = json_body ( request . body . decode ( "utf-8" ) ) error = _save_user ( request , credentials , new = True ) if error is not None : return render_json ( request , error , template = 'user_json.html' , status = 400 ) else : auth . login ( request , request . user ) request . method = "GET" return profile ( request , status = 201 ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) ) | Create a new user with the given credentials . |
13,335 | def session ( request ) : if request . user . id is None : return render_json ( request , { 'error' : _ ( 'There is no user available to create a session.' ) , 'error_type' : 'user_undefined' } , status = 400 , template = 'user_json.html' ) if request . method == 'GET' : return render_json ( request , Session . objects . get_current_session ( ) , template = 'user_session.html' , help_text = session . __doc__ ) elif request . method == 'POST' : current_session = Session . objects . get_current_session ( ) if current_session is None : return HttpResponseBadRequest ( "there is no current session to modify" ) data = json_body ( request . body . decode ( "utf-8" ) ) locale = data . get ( 'locale' , None ) time_zone = data . get ( 'time_zone' , None ) display_width = data . get ( 'display_width' , None ) display_height = data . get ( 'display_height' , None ) if locale : current_session . locale = locale if time_zone : current_session . time_zone = TimeZone . objects . from_content ( time_zone ) if display_width : current_session . display_width = display_width if display_height : current_session . display_height = display_height current_session . save ( ) return HttpResponse ( 'ok' , status = 202 ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) ) | Get the information about the current session or modify the current session . |
13,336 | def initmobile_view ( request ) : if 'username' in request . GET and 'password' in request . GET : username = request . GET [ 'username' ] password = request . GET [ 'password' ] user = auth . authenticate ( username = username , password = password ) if user is not None : if user . is_active : login ( request , user ) else : user = request . user response = { 'username' : user . username , 'csrftoken' : get_token ( request ) , } if not user . has_usable_password ( ) : password = User . objects . make_random_password ( ) user . set_password ( password ) user . save ( ) response [ 'password' ] = password return HttpResponse ( json . dumps ( response ) ) | Create lazy user with a password . Used from the Android app . Also returns csrf token . |
13,337 | def parse_device_disk ( token ) : name , token = token . split ( "[" , 1 ) number , flags = token . split ( "]" , 1 ) return name , { "number" : int ( number ) , "write_mostly" : "W" in flags , "faulty" : "F" in flags , "spare" : "S" in flags , "replacement" : "R" in flags , } | Parse a single disk from the header line . |
13,338 | def group_by ( what , by ) : return proso . dict . group_keys_by_values ( { x : by ( x ) for x in what } ) | Take a list and apply the given function on each its value then group the values by the function results . |
13,339 | def copy_resource_dir ( src , dest ) : package_name = "mocha" dest = ( dest + "/" + os . path . basename ( src ) ) . rstrip ( "/" ) if pkg_resources . resource_isdir ( package_name , src ) : if not os . path . isdir ( dest ) : os . makedirs ( dest ) for res in pkg_resources . resource_listdir ( __name__ , src ) : copy_resource_dir ( src + "/" + res , dest ) else : if not os . path . isfile ( dest ) and os . path . splitext ( src ) [ 1 ] not in [ ".pyc" ] : copy_resource_file ( src , dest ) | To copy package data directory to destination |
13,340 | def init ( ) : mochapyfile = os . path . join ( os . path . join ( CWD , "brew.py" ) ) header ( "Initializing Mocha ..." ) if os . path . isfile ( mochapyfile ) : print ( "WARNING: It seems like Mocha is already setup!" ) print ( "*" * 80 ) else : print ( "" ) print ( "Copying files to the current directory..." ) copy_resource_dir ( SKELETON_DIR + "/create/" , CWD ) print ( "" ) _npm_install_static ( ) print ( "" ) print ( "----- Your Mocha is ready! ----" ) print ( "" ) print ( "> What's next?" ) print ( "- Edit the config [ application/config.py ] " ) print ( "- If necessary setup your model database [ mocha :initdb ]" ) print ( "- Launch app on development mode, run [ mocha :serve ]" ) print ( "" ) print ( "*" * 80 ) | Setup Mocha in the current directory |
13,341 | def add_view ( name , no_template ) : app_dest = APPLICATION_DIR viewsrc = "%s/create-view/view.py" % SKELETON_DIR tplsrc = "%s/create-view/template.jade" % SKELETON_DIR viewdest_dir = os . path . join ( app_dest , "views" ) viewdest = os . path . join ( viewdest_dir , "%s.py" % name ) tpldest_dir = os . path . join ( app_dest , "templates/%s/Index" % name ) tpldest = os . path . join ( tpldest_dir , "index.jade" ) header ( "Adding New View" ) print ( "View: %s" % viewdest . replace ( CWD , "" ) ) if not no_template : print ( "Template: %s" % tpldest . replace ( CWD , "" ) ) else : print ( "* Template will not be created because of the flag --no-template| -t" ) if os . path . isfile ( viewdest ) or os . path . isfile ( tpldest ) : print ( "*** ERROR: View or Template file exist already" ) else : if not os . path . isdir ( viewdest_dir ) : utils . make_dirs ( viewdest_dir ) copy_resource_file ( viewsrc , viewdest ) with open ( viewdest , "r+" ) as vd : content = vd . read ( ) . replace ( "%ROUTE%" , name . lower ( ) ) . replace ( "%NAV_TITLE%" , name . capitalize ( ) ) vd . seek ( 0 ) vd . write ( content ) vd . truncate ( ) if not no_template : if not os . path . isdir ( tpldest_dir ) : utils . make_dirs ( tpldest_dir ) copy_resource_file ( tplsrc , tpldest ) print ( "" ) print ( "*" * 80 ) | Create a new view and template page |
13,342 | def initdb ( ) : print ( "Syncing up database..." ) cwd_to_sys_path ( ) if db and hasattr ( db , "Model" ) : db . create_all ( ) for m in db . Model . __subclasses__ ( ) : if hasattr ( m , "initialize__" ) : print ( "Sync up model: %s ..." % m . __name__ ) getattr ( m , "initialize__" ) ( ) print ( "Done" ) | Sync database Create new tables etc ... |
13,343 | def _set_flask_alembic ( ) : from flask_alembic import Alembic application . app . extensions [ "sqlalchemy" ] = type ( '' , ( ) , { "db" : db } ) alembic = Alembic ( ) alembic . init_app ( application . app ) return alembic | Add the SQLAlchemy object in the global extension |
13,344 | def assets2s3 ( ) : import flask_s3 header ( "Assets2S3..." ) print ( "" ) print ( "Building assets files..." ) print ( "" ) build_assets ( application . app ) print ( "" ) print ( "Uploading assets files to S3 ..." ) flask_s3 . create_all ( application . app ) print ( "" ) | Upload assets files to S3 |
13,345 | def launch ( thing , title = False ) : html = htmlFromThing ( thing , title = title ) if not html : print ( "no HTML was generated." ) return fname = "%s/%s.html" % ( tempfile . gettempdir ( ) , str ( time . time ( ) ) ) with open ( fname , 'w' ) as f : f . write ( html ) webbrowser . open ( fname ) | analyze a thing create a nice HTML document and launch it . |
13,346 | def analyzeThing ( originalThing2 ) : originalThing = copy . copy ( originalThing2 ) things = { } for name in sorted ( dir ( originalThing ) ) : print ( "analyzing" , name ) thing = copy . copy ( originalThing ) if name in webinspect . blacklist or name . lower ( ) in webinspect . blacklist : item = "DID NOT EVALUATE (this will appear as a string)" else : item = getattr ( thing , name ) itemType = type ( item ) . __name__ itemStr = thingToString ( item ) itemEval = "" if "method" in itemStr : if name in webinspect . blacklist or name . lower ( ) in webinspect . blacklist : itemEval = "DID NOT EVALUATE" else : print ( "executing %s()" % name ) print ( "I'm about to try..." ) try : itemEval = thingToString ( getattr ( thing , name ) ( ) ) except Exception as e : exceptionToString ( e ) things [ name ] = [ itemType , itemStr , itemEval ] return things | analyze an object and all its attirbutes . Returns a dictionary . |
13,347 | def websafe ( s ) : s = s . replace ( "<" , "<" ) . replace ( ">" , ">" ) s = s . replace ( r'\x' , r' \x' ) s = s . replace ( "\n" , "<br>" ) return s | return a string with HTML - safe text |
13,348 | def slugify ( text , delim = '-' ) : punctuation_re = re . compile ( r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.:]+' ) result = [ ] for word in punctuation_re . split ( text . lower ( ) ) : word = normalize_text ( word ) if word : result . append ( word ) return delim . join ( result ) | Generates an slightly worse ASCII - only slug . |
13,349 | def javascript_escape ( s , quote_double_quotes = True ) : ustring_re = re . compile ( u"([\u0080-\uffff])" ) def fix ( match ) : return r"\u%04x" % ord ( match . group ( 1 ) ) if type ( s ) == str : s = s . decode ( 'utf-8' ) elif type ( s ) != six . text_type : raise TypeError ( s ) s = s . replace ( '\\' , '\\\\' ) s = s . replace ( '\r' , '\\r' ) s = s . replace ( '\n' , '\\n' ) s = s . replace ( '\t' , '\\t' ) s = s . replace ( "'" , "\\'" ) if quote_double_quotes : s = s . replace ( '"' , '"' ) return str ( ustring_re . sub ( fix , s ) ) | Escape characters for javascript strings . |
13,350 | def seconds_to_hms_verbose ( t ) : hours = int ( ( t / 3600 ) ) mins = int ( ( t / 60 ) % 60 ) secs = int ( t % 60 ) return ' ' . join ( [ ( hours + ' hour' + ( 's' if hours > 1 else '' ) ) if hours > 0 else '' , ( mins + ' minute' + ( 's' if mins > 1 else '' ) ) if mins > 0 else '' , ( secs + ' second' + ( 's' if secs > 1 else '' ) ) if secs > 0 else '' ] ) | Converts seconds float to H hours 8 minutes 30 seconds format |
13,351 | def pretty_render ( data , format = 'text' , indent = 0 ) : if format == 'json' : return render_json ( data ) elif format == 'html' : return render_html ( data ) elif format == 'xml' : return render_xml ( data ) else : return dict_to_plaintext ( data , indent = indent ) | Render a dict based on a format |
13,352 | def dict_to_xml ( xml_dict ) : import lxml . etree as etree root_tag = list ( xml_dict . keys ( ) ) [ 0 ] root = etree . Element ( root_tag ) _dict_to_xml_recurse ( root , xml_dict [ root_tag ] ) return root | Converts a dictionary to an XML ElementTree Element |
13,353 | def xml_get_tag ( xml , tag , parent_tag = None , multi_line = False ) : expr_str = '[<:]' + tag + '.*?>(?P<matched_text>.+?)<' if parent_tag : expr_str = '[<:]' + parent_tag + '.*?>.*?' + expr_str expr = re . compile ( expr_str , re . DOTALL | re . IGNORECASE ) if multi_line : return expr . findall ( xml ) else : if expr . search ( xml ) : return expr . search ( xml ) . group ( 'matched_text' ) . strip ( ) else : return None | Returns the tag data for the first instance of the named tag or for all instances if multi is true . If a parent tag is specified then that will be required before the tag . |
13,354 | def _build_table ( self ) -> Dict [ State , Tuple [ Multiplex , ... ] ] : result : Dict [ State , Tuple [ Multiplex , ... ] ] = { } for state in self . influence_graph . all_states ( ) : result [ state ] = tuple ( multiplex for multiplex in self . influence_graph . multiplexes if multiplex . is_active ( state ) ) return result | Private method which build the table which map a State to the active multiplex . |
13,355 | def _to_base36 ( number ) : if number < 0 : raise ValueError ( "Cannot encode negative numbers" ) chars = "" while number != 0 : number , i = divmod ( number , 36 ) chars = _alphabet [ i ] + chars return chars or "0" | Convert a positive integer to a base36 string . |
13,356 | def _pad ( string , size ) : strlen = len ( string ) if strlen == size : return string if strlen < size : return _padding [ 0 : size - strlen ] + string return string [ - size : ] | Pad a string with leading zeroes to fit the given size truncating if necessary . |
13,357 | def _random_block ( ) : random_number = random . randint ( 0 , DISCRETE_VALUES ) random_string = _to_base36 ( random_number ) return _pad ( random_string , BLOCK_SIZE ) | Generate a random string of BLOCK_SIZE length . |
13,358 | def get_process_fingerprint ( ) : pid = os . getpid ( ) hostname = socket . gethostname ( ) padded_pid = _pad ( _to_base36 ( pid ) , 2 ) hostname_hash = sum ( [ ord ( x ) for x in hostname ] ) + len ( hostname ) + 36 padded_hostname = _pad ( _to_base36 ( hostname_hash ) , 2 ) return padded_pid + padded_hostname | Extract a unique fingerprint for the current process using a combination of the process PID and the system s hostname . |
13,359 | def counter ( self ) : self . _counter += 1 if self . _counter >= DISCRETE_VALUES : self . _counter = 0 return self . _counter | Rolling counter that ensures same - machine and same - time cuids don t collide . |
13,360 | def cuid ( self ) : identifier = "c" millis = int ( time . time ( ) * 1000 ) identifier += _to_base36 ( millis ) count = _pad ( _to_base36 ( self . counter ) , BLOCK_SIZE ) identifier += count identifier += self . fingerprint identifier += _random_block ( ) identifier += _random_block ( ) return identifier | Generate a full - length cuid as a string . |
13,361 | def read_excel_file ( inputfile , sheet_name ) : workbook = xlrd . open_workbook ( inputfile ) output = [ ] found = False for sheet in workbook . sheets ( ) : if sheet . name == sheet_name : found = True for row in range ( sheet . nrows ) : values = [ ] for col in range ( sheet . ncols ) : values . append ( sheet . cell ( row , col ) . value ) output . append ( values ) if not found : raise MQ2Exception ( 'Invalid session identifier provided' ) return output | Return a matrix containing all the information present in the excel sheet of the specified excel document . |
13,362 | def get_session_identifiers ( cls , folder = None , inputfile = None ) : sessions = [ ] if inputfile and folder : raise MQ2Exception ( 'You should specify either a folder or a file' ) if folder : if not os . path . isdir ( folder ) : return sessions for root , dirs , files in os . walk ( folder ) : for filename in files : filename = os . path . join ( root , filename ) for ext in SUPPORTED_FILES : if filename . endswith ( ext ) : wbook = xlrd . open_workbook ( filename ) for sheet in wbook . sheets ( ) : if sheet . name not in sessions : sessions . append ( sheet . name ) elif inputfile : if os . path . isdir ( inputfile ) : return sessions for ext in SUPPORTED_FILES : if inputfile . endswith ( ext ) : wbook = xlrd . open_workbook ( inputfile ) for sheet in wbook . sheets ( ) : if sheet . name not in sessions : sessions . append ( sheet . name ) return sessions | Retrieve the list of session identifiers contained in the data on the folder or the inputfile . For this plugin it returns the list of excel sheet available . |
13,363 | def file_rights ( filepath , mode = None , uid = None , gid = None ) : file_handle = os . open ( filepath , os . O_RDONLY ) if mode : os . fchmod ( file_handle , mode ) if uid : if not gid : gid = 0 os . fchown ( file_handle , uid , gid ) os . close ( file_handle ) | Change file rights |
13,364 | def init_from_wave_file ( wavpath ) : try : samplerate , data = SW . read ( wavpath ) nframes = data . shape [ 0 ] except : try : w = wave . open ( wavpath ) samplerate = w . getframerate ( ) nframes = w . getnframes ( ) except : raise Exception ( 'Cannot decode wavefile ' + wavpath ) return SVEnv ( samplerate , nframes , wavpath ) | Init a sonic visualiser environment structure based the analysis of the main audio file . The audio file have to be encoded in wave |
13,365 | def add_continuous_annotations ( self , x , y , colourName = 'Purple' , colour = '#c832ff' , name = '' , view = None , vscale = None , presentationName = None ) : model = self . data . appendChild ( self . doc . createElement ( 'model' ) ) imodel = self . nbdata for atname , atval in [ ( 'id' , imodel + 1 ) , ( 'dataset' , imodel ) , ( 'name' , name ) , ( 'sampleRate' , self . samplerate ) , ( 'start' , int ( min ( x ) * self . samplerate ) ) , ( 'end' , int ( max ( x ) * self . samplerate ) ) , ( 'type' , 'sparse' ) , ( 'dimensions' , '2' ) , ( 'resolution' , '1' ) , ( 'notifyOnAdd' , 'true' ) , ( 'minimum' , min ( y ) ) , ( 'maximum' , max ( y ) ) , ( 'units' , '' ) ] : model . setAttribute ( atname , str ( atval ) ) dataset = self . data . appendChild ( SVDataset2D ( self . doc , str ( imodel ) , self . samplerate ) ) dataset . set_data_from_iterable ( map ( int , np . array ( x ) * self . samplerate ) , y ) self . nbdata += 2 valruler = self . __add_time_ruler ( ) vallayer = self . __add_val_layer ( imodel + 1 ) vallayer . setAttribute ( 'colourName' , colourName ) vallayer . setAttribute ( 'colour' , colour ) if presentationName : vallayer . setAttribute ( 'presentationName' , presentationName ) if vscale is None : vallayer . setAttribute ( 'verticalScale' , '0' ) vallayer . setAttribute ( 'scaleMinimum' , str ( min ( y ) ) ) vallayer . setAttribute ( 'scaleMaximum' , str ( max ( y ) ) ) else : vallayer . setAttribute ( 'verticalScale' , '0' ) vallayer . setAttribute ( 'scaleMinimum' , str ( vscale [ 0 ] ) ) vallayer . setAttribute ( 'scaleMaximum' , str ( vscale [ 1 ] ) ) if view is None : view = self . __add_view ( ) self . __add_layer_reference ( view , valruler ) self . __add_layer_reference ( view , vallayer ) return view | add a continous annotation layer |
13,366 | def add_interval_annotations ( self , temp_idx , durations , labels , values = None , colourName = 'Purple' , colour = '#c832ff' , name = '' , view = None , presentationName = None ) : model = self . data . appendChild ( self . doc . createElement ( 'model' ) ) imodel = self . nbdata for atname , atval in [ ( 'id' , imodel + 1 ) , ( 'dataset' , imodel ) , ( 'name' , name ) , ( 'sampleRate' , self . samplerate ) , ( 'type' , 'sparse' ) , ( 'dimensions' , '3' ) , ( 'subtype' , 'region' ) , ( 'resolution' , '1' ) , ( 'notifyOnAdd' , 'true' ) , ( 'units' , '' ) , ( 'valueQuantization' , '0' ) ] : model . setAttribute ( atname , str ( atval ) ) dataset = self . data . appendChild ( SVDataset3D ( self . doc , str ( imodel ) , self . samplerate ) ) if values is None : values = ( [ 0 ] * len ( temp_idx ) ) dataset . set_data_from_iterable ( map ( int , np . array ( temp_idx ) * self . samplerate ) , values , map ( int , np . array ( durations ) * self . samplerate ) , labels ) self . nbdata += 2 valruler = self . __add_time_ruler ( ) vallayer = self . __add_region_layer ( imodel + 1 , name ) vallayer . setAttribute ( 'colourName' , colourName ) vallayer . setAttribute ( 'colour' , colour ) if presentationName : vallayer . setAttribute ( 'presentationName' , presentationName ) if view is None : view = self . __add_view ( ) self . __add_layer_reference ( view , valruler ) self . __add_layer_reference ( view , vallayer ) return view | add a labelled interval annotation layer |
13,367 | def load_initial ( self , streams ) : d = { } for stream in streams : s = io . load ( stream ) if 'BLOCK' not in s : raise ValueError ( "No BLOCK found" ) d . update ( s [ 'BLOCK' ] ) d = { 'BLOCK' : d } C = io . wc_lha2dict ( d ) sm = io . sm_lha2dict ( d ) C . update ( sm ) C = definitions . symmetrize ( C ) self . C_in = C | Load the initial values for parameters and Wilson coefficients from one or several files . |
13,368 | def load_wcxf ( self , stream , get_smpar = True ) : import wcxf wc = wcxf . WC . load ( stream ) self . set_initial_wcxf ( wc , get_smpar = get_smpar ) | Load the initial values for Wilson coefficients from a file - like object or a string in WCxf format . |
13,369 | def dump_wcxf ( self , C_out , scale_out , fmt = 'yaml' , stream = None , ** kwargs ) : wc = self . get_wcxf ( C_out , scale_out ) return wc . dump ( fmt = fmt , stream = stream , ** kwargs ) | Return a string representation of the Wilson coefficients C_out in WCxf format . If stream is specified export it to a file . fmt defaults to yaml but can also be json . |
13,370 | def rgevolve_leadinglog ( self , scale_out ) : self . _check_initial ( ) return rge . smeft_evolve_leadinglog ( C_in = self . C_in , scale_high = self . scale_high , scale_in = self . scale_in , scale_out = scale_out ) | Compute the leading logarithmix approximation to the solution of the SMEFT RGEs from the initial scale to scale_out . Returns a dictionary with parameters and Wilson coefficients . Much faster but less precise that rgevolve . |
13,371 | def _check_initial ( self ) : if self . C_in is None : raise Exception ( "You have to specify the initial conditions first." ) if self . scale_in is None : raise Exception ( "You have to specify the initial scale first." ) if self . scale_high is None : raise Exception ( "You have to specify the high scale first." ) | Check if initial values and scale as well as the new physics scale have been set . |
13,372 | def rotate_defaultbasis ( self , C ) : v = sqrt ( 2 * C [ 'm2' ] . real / C [ 'Lambda' ] . real ) Mep = v / sqrt ( 2 ) * ( C [ 'Ge' ] - C [ 'ephi' ] * v ** 2 / self . scale_high ** 2 / 2 ) Mup = v / sqrt ( 2 ) * ( C [ 'Gu' ] - C [ 'uphi' ] * v ** 2 / self . scale_high ** 2 / 2 ) Mdp = v / sqrt ( 2 ) * ( C [ 'Gd' ] - C [ 'dphi' ] * v ** 2 / self . scale_high ** 2 / 2 ) Mnup = - v ** 2 * C [ 'llphiphi' ] UeL , Me , UeR = ckmutil . diag . msvd ( Mep ) UuL , Mu , UuR = ckmutil . diag . msvd ( Mup ) UdL , Md , UdR = ckmutil . diag . msvd ( Mdp ) Unu , Mnu = ckmutil . diag . mtakfac ( Mnup ) UuL , UdL , UuR , UdR = ckmutil . phases . rephase_standard ( UuL , UdL , UuR , UdR ) Unu , UeL , UeR = ckmutil . phases . rephase_pmns_standard ( Unu , UeL , UeR ) return definitions . flavor_rotation ( C , Uq = UdL , Uu = UuR , Ud = UdR , Ul = UeL , Ue = UeR ) | Rotate all parameters to the basis where the running down - type quark and charged lepton mass matrices are diagonal and where the running up - type quark mass matrix has the form V . S with V unitary and S real diagonal and where the CKM and PMNS matrices have the standard phase convention . |
13,373 | def set_database ( db_url , proxy , config ) : db_config = config . get ( 'results_database' , { } ) . get ( 'params' , { } ) if 'testing' in config and config [ 'testing' ] is True : database = connect ( 'sqlite:////tmp/results.sqlite' , check_same_thread = False , threadlocals = True ) else : if os . path . isfile ( db_url ) or os . path . isdir ( os . path . dirname ( db_url ) ) : db_url = "sqlite:///" + db_url db_config . update ( check_same_thread = False , threadlocals = True ) database = connect ( db_url , ** db_config ) proxy . initialize ( database ) | Initialize the peewee database with the given configuration |
13,374 | def sh ( cmd ) : import inspect frame = inspect . currentframe ( ) try : locals = frame . f_back . f_locals finally : del frame from subprocess import Popen , PIPE , CalledProcessError process = Popen ( cmd . format ( ** locals ) , shell = True , stdout = PIPE ) stdout , unused_stderr = process . communicate ( ) retcode = process . poll ( ) if retcode : error = subprocess . CalledProcessError ( retcode , cmd ) error . output = stdout raise error return stdout . strip ( ) | Run the given command in a shell . |
13,375 | def get_curricula_by_department ( department , future_terms = 0 , view_unpublished = False ) : if not isinstance ( future_terms , int ) : raise ValueError ( future_terms ) if future_terms < 0 or future_terms > 2 : raise ValueError ( future_terms ) view_unpublished = "true" if view_unpublished else "false" url = "{}?{}" . format ( curriculum_search_url_prefix , urlencode ( [ ( "department_abbreviation" , department . label , ) , ( "future_terms" , future_terms , ) , ( "view_unpublished" , view_unpublished , ) ] ) ) return _json_to_curricula ( get_resource ( url ) ) | Returns a list of restclients . Curriculum models for the passed Department model . |
13,376 | def get_curricula_by_term ( term , view_unpublished = False ) : view_unpublished = "true" if view_unpublished else "false" url = "{}?{}" . format ( curriculum_search_url_prefix , urlencode ( [ ( "quarter" , term . quarter . lower ( ) , ) , ( "year" , term . year , ) , ( "view_unpublished" , view_unpublished , ) ] ) ) return _json_to_curricula ( get_resource ( url ) ) | Returns a list of restclients . Curriculum models for the passed Term model . |
13,377 | def BP ( candidate , references ) : c = len ( candidate ) ref_lens = ( len ( reference ) for reference in references ) r = min ( ref_lens , key = lambda ref_len : ( abs ( ref_len - c ) , ref_len ) ) if c > r : return 1 else : return math . exp ( 1 - r / c ) | calculate brevity penalty |
13,378 | def MP ( candidate , references , n ) : counts = Counter ( ngrams ( candidate , n ) ) if not counts : return 0 max_counts = { } for reference in references : reference_counts = Counter ( ngrams ( reference , n ) ) for ngram in counts : max_counts [ ngram ] = max ( max_counts . get ( ngram , 0 ) , reference_counts [ ngram ] ) clipped_counts = dict ( ( ngram , min ( count , max_counts [ ngram ] ) ) for ngram , count in counts . items ( ) ) return sum ( clipped_counts . values ( ) ) / sum ( counts . values ( ) ) | calculate modified precision |
13,379 | def template2regex ( template , ranges = None ) : if len ( template ) and - 1 < template . find ( '|' ) < len ( template ) - 1 : raise InvalidTemplateError ( "'|' may only appear at the end, found at position %d in %s" % ( template . find ( '|' ) , template ) ) if ranges is None : ranges = DEFAULT_RANGES anchor = True state = S_PATH if len ( template ) and template [ - 1 ] == '|' : anchor = False params = [ ] bracketdepth = 0 result = [ '^' ] name = "" pattern = "[^/]+" rangename = None for c in template_splitter . split ( template ) : if state == S_PATH : if len ( c ) > 1 : result . append ( re . escape ( c ) ) elif c == '[' : result . append ( "(" ) bracketdepth += 1 elif c == ']' : bracketdepth -= 1 if bracketdepth < 0 : raise InvalidTemplateError ( "Mismatched brackets in %s" % template ) result . append ( ")?" ) elif c == '{' : name = "" state = S_TEMPLATE elif c == '}' : raise InvalidTemplateError ( "Mismatched braces in %s" % template ) elif c == '|' : pass else : result . append ( re . escape ( c ) ) else : if c == '}' : if rangename and rangename in ranges : result . append ( "(?P<%s>%s)" % ( name , ranges [ rangename ] ) ) else : result . append ( "(?P<%s>%s)" % ( name , pattern ) ) params . append ( name ) state = S_PATH rangename = None else : name = c if name . find ( ":" ) > - 1 : name , rangename = name . split ( ":" ) if bracketdepth != 0 : raise InvalidTemplateError ( "Mismatched brackets in %s" % template ) if state == S_TEMPLATE : raise InvalidTemplateError ( "Mismatched braces in %s" % template ) if anchor : result . append ( '$' ) return "" . join ( result ) , params | Convert a URL template to a regular expression . |
13,380 | def add_callback ( self , phase , fn ) : try : self . __callbacks [ phase ] . append ( fn ) except KeyError : raise KeyError ( "Invalid callback phase '%s'. Must be one of %s" % ( phase , _callback_phases ) ) | Adds a callback to the context . |
13,381 | def add_property ( self , name , fn , cached = True ) : if name in self . __properties : raise KeyError ( "Trying to add a property '%s' that already exists on this %s object." % ( name , self . __class__ . __name__ ) ) self . __properties [ name ] = ( fn , cached ) | Adds a property to the Context . |
13,382 | def path ( self , args , kw ) : params = self . _pop_params ( args , kw ) if args or kw : raise InvalidArgumentError ( "Extra parameters (%s, %s) when building path for %s" % ( args , kw , self . template ) ) return self . build_url ( ** params ) | Builds the URL path fragment for this route . |
13,383 | def add ( self , template , resource , name = None ) : if hasattr ( resource , '_rhino_meta' ) : route = Route ( template , Resource ( resource ) , name = name , ranges = self . ranges ) else : route = Route ( template , resource , name = name , ranges = self . ranges ) obj_id = id ( resource ) if obj_id not in self . _lookup : self . _lookup [ obj_id ] = route if name is not None : if name in self . named_routes : raise InvalidArgumentError ( "A route named '%s' already exists in this %s object." % ( name , self . __class__ . __name__ ) ) self . named_routes [ name ] = route self . routes . append ( route ) | Add a route to a resource . |
13,384 | def add_ctx_property ( self , name , fn , cached = True ) : if name in [ item [ 0 ] for item in self . _ctx_properties ] : raise InvalidArgumentError ( "A context property name '%s' already exists." % name ) self . _ctx_properties . append ( [ name , ( fn , cached ) ] ) | Install a context property . |
13,385 | def path ( self , target , args , kw ) : if type ( target ) in string_types : if ':' in target : prefix , rest = target . split ( ':' , 1 ) route = self . named_routes [ prefix ] prefix_params = route . _pop_params ( args , kw ) prefix_path = route . path ( [ ] , prefix_params ) next_mapper = route . resource return prefix_path + next_mapper . path ( rest , args , kw ) else : return self . named_routes [ target ] . path ( args , kw ) elif isinstance ( target , Route ) : for route in self . routes : if route is target : return route . path ( args , kw ) raise InvalidArgumentError ( "Route '%s' not found in this %s object." % ( target , self . __class__ . __name__ ) ) else : target_id = id ( target ) if target_id in self . _lookup : return self . _lookup [ target_id ] . path ( args , kw ) raise InvalidArgumentError ( "No Route found for target '%s' in this %s object." % ( target , self . __class__ . __name__ ) ) | Build a URL path fragment for a resource or route . |
13,386 | def wsgi ( self , environ , start_response ) : request = Request ( environ ) ctx = Context ( request ) try : try : response = self ( request , ctx ) ctx . _run_callbacks ( 'finalize' , ( request , response ) ) response = response . conditional_to ( request ) except HTTPException as e : response = e . response except Exception : self . handle_error ( request , ctx ) response = InternalServerError ( ) . response response . add_callback ( lambda : ctx . _run_callbacks ( 'close' ) ) return response ( environ , start_response ) finally : ctx . _run_callbacks ( 'teardown' , log_errors = True ) | Implements the mapper s WSGI interface . |
13,387 | def start_server ( self , host = 'localhost' , port = 9000 , app = None ) : from wsgiref . simple_server import make_server if app is None : app = self . wsgi server = make_server ( host , port , app ) server_addr = "%s:%s" % ( server . server_name , server . server_port ) print "Server listening at http://%s/" % server_addr server . serve_forever ( ) | Start a wsgiref . simple_server based server to run this mapper . |
13,388 | def generate_docs ( app ) : config = app . config config_dir = app . env . srcdir source_root = os . path . join ( config_dir , config . apidoc_source_root ) output_root = os . path . join ( config_dir , config . apidoc_output_root ) execution_dir = os . path . join ( config_dir , '..' ) cleanup ( output_root ) command = [ 'sphinx-apidoc' , '-f' , '-o' , output_root , source_root ] for exclude in config . apidoc_exclude : command . append ( os . path . join ( source_root , exclude ) ) process = Popen ( command , cwd = execution_dir ) process . wait ( ) | Run sphinx - apidoc to generate Python API documentation for the project . |
13,389 | def cleanup ( output_root ) : if os . path . exists ( output_root ) : if os . path . isdir ( output_root ) : rmtree ( output_root ) else : os . remove ( output_root ) | Remove any reST files which were generated by this extension |
13,390 | def build ( cls : Type [ T ] , data : Generic ) -> T : fields = fields_dict ( cls ) kwargs : Dict [ str , Any ] = { } for key , value in data . items ( ) : if key in fields : if isinstance ( value , Mapping ) : t = fields [ key ] . type if issubclass ( t , Auto ) : value = t . build ( value ) else : value = Auto . generate ( value , name = key . title ( ) ) kwargs [ key ] = value else : log . debug ( f"got unknown attribute {key} for {cls.__name__}" ) return cls ( ** kwargs ) | Build objects from dictionaries recursively . |
13,391 | def generate ( cls : Type [ T ] , data : Generic , name : str = None , * , recursive : bool = True ) -> T : if name is None : name = cls . __name__ kls = make_class ( name , { k : ib ( default = None ) for k in data } , bases = ( cls , ) ) data = { k : ( cls . generate ( v , k . title ( ) ) if recursive and isinstance ( v , Mapping ) else v ) for k , v in data . items ( ) } return kls ( ** data ) | Build dataclasses and objects from dictionaries recursively . |
13,392 | def emit_answer_event ( sender , instance , ** kwargs ) : if not issubclass ( sender , Answer ) or not kwargs [ 'created' ] : return logger = get_events_logger ( ) logger . emit ( 'answer' , { "user_id" : instance . user_id , "is_correct" : instance . item_asked_id == instance . item_answered_id , "context_id" : [ instance . context_id ] if instance . context_id else [ ] , "item_id" : instance . item_id , "response_time_ms" : instance . response_time , "params" : { "session_id" : instance . session_id , "guess" : instance . guess , "practice_set_id" : instance . practice_set_id , "config_id" : instance . config_id , } } ) | Save answer event to log file . |
13,393 | def get_all_available_leaves ( self , language = None , forbidden_item_ids = None ) : return self . get_all_leaves ( language = language , forbidden_item_ids = forbidden_item_ids ) | Get all available leaves . |
13,394 | def get_children_graph ( self , item_ids = None , language = None , forbidden_item_ids = None ) : if forbidden_item_ids is None : forbidden_item_ids = set ( ) def _children ( item_ids ) : if item_ids is None : items = Item . objects . filter ( active = True ) . prefetch_related ( 'children' ) else : item_ids = [ ii for iis in item_ids . values ( ) for ii in iis ] items = Item . objects . filter ( id__in = item_ids , active = True ) . prefetch_related ( 'children' ) return { item . id : sorted ( [ _item . id for _item in item . children . all ( ) if _item . active and _item . id not in forbidden_item_ids ] ) for item in items if item . id not in forbidden_item_ids } if item_ids is None : return self . _reachable_graph ( None , _children , language = language ) else : graph = self . get_children_graph ( None , language , forbidden_item_ids = forbidden_item_ids ) return self . _subset_graph ( graph , set ( item_ids ) - set ( forbidden_item_ids ) ) | Get a subgraph of items reachable from the given set of items through the child relation . |
13,395 | def get_parents_graph ( self , item_ids , language = None ) : def _parents ( item_ids ) : if item_ids is None : items = Item . objects . filter ( active = True ) . prefetch_related ( 'parents' ) else : item_ids = [ ii for iis in item_ids . values ( ) for ii in iis ] items = Item . objects . filter ( id__in = item_ids , active = True ) . prefetch_related ( 'parents' ) return { item . id : sorted ( [ _item . id for _item in item . parents . all ( ) ] ) for item in items } return self . _reachable_graph ( item_ids , _parents , language = language ) if item_ids is None : return self . _reachable_graph ( None , _parents , language = language ) else : graph = self . get_parents_graph ( None , language ) return self . _subset_graph ( graph , item_ids ) | Get a subgraph of items reachable from the given set of items through the parent relation . |
13,396 | def get_graph ( self , item_ids , language = None ) : def _related ( item_ids ) : if item_ids is None : items = Item . objects . filter ( active = True ) . prefetch_related ( 'parents' , 'children' ) else : item_ids = [ ii for iis in item_ids . values ( ) for ii in iis ] items = Item . objects . filter ( id__in = item_ids , active = True ) . prefetch_related ( 'parents' , 'children' ) return { item . id : sorted ( [ _item . id for rel in [ item . parents . all ( ) , item . children . all ( ) ] for _item in rel ] ) for item in items } if item_ids is None : return self . _reachable_graph ( None , _related , language = language ) else : graph = self . get_graph ( None , language ) return self . _subset_graph ( graph , item_ids ) | Get a subgraph of items reachable from the given set of items through any relation . |
13,397 | def translate_item_ids ( self , item_ids , language , is_nested = None ) : if is_nested is None : def is_nested_fun ( x ) : return True elif isinstance ( is_nested , bool ) : def is_nested_fun ( x ) : return is_nested else : is_nested_fun = is_nested all_item_type_ids = ItemType . objects . get_all_item_type_ids ( ) groupped = proso . list . group_by ( item_ids , by = lambda item_id : all_item_type_ids [ item_id ] ) result = { } for item_type_id , items in groupped . items ( ) : with timeit ( 'translating item type {}' . format ( item_type_id ) ) : item_type = ItemType . objects . get_all_types ( ) [ item_type_id ] model = ItemType . objects . get_model ( item_type_id ) kwargs = { '{}__in' . format ( item_type [ 'foreign_key' ] ) : items } if 'language' in item_type : kwargs [ item_type [ 'language' ] ] = language if any ( [ not is_nested_fun ( item_id ) for item_id in items ] ) and hasattr ( model . objects , 'prepare_related' ) : objs = model . objects . prepare_related ( ) elif hasattr ( model . objects , 'prepare' ) : objs = model . objects . prepare ( ) else : objs = model . objects for obj in objs . filter ( ** kwargs ) : item_id = getattr ( obj , item_type [ 'foreign_key' ] ) result [ item_id ] = obj . to_json ( nested = is_nested_fun ( item_id ) ) return result | Translate a list of item ids to JSON objects which reference them . |
13,398 | def get_leaves ( self , item_ids = None , language = None , forbidden_item_ids = None ) : forbidden_item_ids = set ( ) if forbidden_item_ids is None else set ( forbidden_item_ids ) children = self . get_children_graph ( item_ids , language = language , forbidden_item_ids = forbidden_item_ids ) counts = self . get_children_counts ( active = None ) if item_ids is None : item_ids = set ( children . keys ( ) ) def _get_leaves ( item_id ) : leaves = set ( ) def __search ( item_ids ) : result = set ( flatten ( [ children . get ( item_id , [ ] ) for item_id in item_ids ] ) ) new_leaves = { item_id for item_id in result if item_id not in children . keys ( ) } leaves . update ( new_leaves ) return result - new_leaves fixed_point ( is_zero = lambda to_visit : len ( to_visit ) == 0 , minus = lambda to_visit , visited : to_visit - visited , plus = lambda visited_x , visited_y : visited_x | visited_y , f = __search , x = { item_id } ) leaves = { leaf for leaf in leaves if counts [ leaf ] == 0 } if len ( leaves ) > 0 : return leaves if counts [ item_id ] == 0 and item_id not in forbidden_item_ids : return { item_id } return set ( ) return { item_id : _get_leaves ( item_id ) for item_id in item_ids } | Get mapping of items to their reachable leaves . Leaves having inactive relations to other items are omitted . |
13,399 | def get_all_leaves ( self , item_ids = None , language = None , forbidden_item_ids = None ) : return sorted ( set ( flatten ( self . get_leaves ( item_ids , language = language , forbidden_item_ids = forbidden_item_ids ) . values ( ) ) ) ) | Get all leaves reachable from the given set of items . Leaves having inactive relations to other items are omitted . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.