idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
57,100
def _build_url ( * args , ** kwargs ) -> str : resource_url = API_RESOURCES_URLS for key in args : resource_url = resource_url [ key ] if kwargs : resource_url = resource_url . format ( ** kwargs ) return urljoin ( URL , resource_url )
Return a valid url .
57,101
def _get ( url : str , headers : dict ) -> dict : response = requests . get ( url , headers = headers ) data = response . json ( ) if response . status_code != 200 : raise GoogleApiError ( { "status_code" : response . status_code , "error" : data . get ( "error" , "" ) } ) return data
Make a GET call .
57,102
def _post ( url : str , params : dict , headers : dict ) -> dict : response = requests . post ( url , params = params , headers = headers ) data = response . json ( ) if response . status_code != 200 or "error" in data : raise GoogleApiError ( { "status_code" : response . status_code , "error" : data . get ( "error" , "" ) } ) return data
Make a POST call .
57,103
def dims_knight ( self , move ) : if self . board . piece_type_at ( move . from_square ) == chess . KNIGHT : rim = SquareSet ( chess . BB_RANK_1 | chess . BB_RANK_8 | chess . BB_FILE_A | chess . BB_FILE_H ) return move . to_square in rim
Knight on the rim is dim
57,104
def get_comment_collection ( cmt_id ) : query = recid = run_sql ( query , ( cmt_id , ) ) record_primary_collection = guess_primary_collection_of_a_record ( recid [ 0 ] [ 0 ] ) return record_primary_collection
Extract the collection where the comment is written
57,105
def get_collection_moderators ( collection ) : from invenio_access . engine import acc_get_authorized_emails res = list ( acc_get_authorized_emails ( 'moderatecomments' , collection = collection ) ) if not res : return [ CFG_WEBCOMMENT_DEFAULT_MODERATOR , ] return res
Return the list of comment moderators for the given collection .
57,106
def get_reply_order_cache_data ( comid ) : return "%s%s%s%s" % ( chr ( ( comid >> 24 ) % 256 ) , chr ( ( comid >> 16 ) % 256 ) , chr ( ( comid >> 8 ) % 256 ) , chr ( comid % 256 ) )
Prepare a representation of the comment ID given as parameter so that it is suitable for byte ordering in MySQL .
57,107
def move_attached_files_to_storage ( attached_files , recID , comid ) : for filename , filepath in iteritems ( attached_files ) : dest_dir = os . path . join ( CFG_COMMENTSDIR , str ( recID ) , str ( comid ) ) try : os . makedirs ( dest_dir ) except : pass shutil . move ( filepath , os . path . join ( dest_dir , filename ) )
Move the files that were just attached to a new comment to their final location .
57,108
def subscribe_user_to_discussion ( recID , uid ) : query = params = ( recID , uid , convert_datestruct_to_datetext ( time . localtime ( ) ) ) try : run_sql ( query , params ) except : return 0 return 1
Subscribe a user to a discussion so the she receives by emails all new new comments for this record .
57,109
def unsubscribe_user_from_discussion ( recID , uid ) : query = params = ( recID , uid ) try : res = run_sql ( query , params ) except : return 0 if res > 0 : return 1 return 0
Unsubscribe users from a discussion .
57,110
def get_users_subscribed_to_discussion ( recID , check_authorizations = True ) : subscribers_emails = { } query = params = ( recID , ) res = run_sql ( query , params ) for row in res : uid = row [ 0 ] if check_authorizations : user_info = UserInfo ( uid ) ( auth_code , auth_msg ) = check_user_can_view_comments ( user_info , recID ) else : auth_code = False if auth_code : unsubscribe_user_from_discussion ( recID , uid ) else : email = User . query . get ( uid ) . email if '@' in email : subscribers_emails [ email ] = True collections_with_auto_replies = CFG_WEBCOMMENT_EMAIL_REPLIES_TO . keys ( ) for collection in collections_with_auto_replies : if recID in get_collection_reclist ( collection ) : fields = CFG_WEBCOMMENT_EMAIL_REPLIES_TO [ collection ] for field in fields : emails = get_fieldvalues ( recID , field ) for email in emails : if not '@' in email : subscribers_emails [ email + '@' + CFG_SITE_SUPPORT_EMAIL . split ( '@' ) [ 1 ] ] = False else : subscribers_emails [ email ] = False return ( [ email for email , can_unsubscribe_p in iteritems ( subscribers_emails ) if can_unsubscribe_p ] , [ email for email , can_unsubscribe_p in iteritems ( subscribers_emails ) if not can_unsubscribe_p ] )
Returns the lists of users subscribed to a given discussion .
57,111
def get_record_status ( recid ) : collections_with_rounds = CFG_WEBCOMMENT_ROUND_DATAFIELD . keys ( ) commenting_round = "" for collection in collections_with_rounds : if recid in get_collection_reclist ( collection ) : commenting_rounds = get_fieldvalues ( recid , CFG_WEBCOMMENT_ROUND_DATAFIELD . get ( collection , "" ) ) if commenting_rounds : commenting_round = commenting_rounds [ 0 ] break collections_with_restrictions = CFG_WEBCOMMENT_RESTRICTION_DATAFIELD . keys ( ) restriction = "" for collection in collections_with_restrictions : if recid in get_collection_reclist ( collection ) : restrictions = get_fieldvalues ( recid , CFG_WEBCOMMENT_RESTRICTION_DATAFIELD . get ( collection , "" ) ) if restrictions : restriction = restrictions [ 0 ] break return ( restriction , commenting_round )
Returns the current status of the record i . e . current restriction to apply for newly submitted comments and current commenting round .
57,112
def group_comments_by_round ( comments , ranking = 0 ) : comment_rounds = { } ordered_comment_round_names = [ ] for comment in comments : comment_round_name = ranking and comment [ 11 ] or comment [ 7 ] if comment_round_name not in comment_rounds : comment_rounds [ comment_round_name ] = [ ] ordered_comment_round_names . append ( comment_round_name ) comment_rounds [ comment_round_name ] . append ( comment ) return [ ( comment_round_name , comment_rounds [ comment_round_name ] ) for comment_round_name in ordered_comment_round_names ]
Group comments by the round to which they belong
57,113
def get_mini_reviews ( recid , ln = CFG_SITE_LANG ) : if CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS : action = 'SUBMIT' else : action = 'DISPLAY' reviews = query_retrieve_comments_or_remarks ( recid , ranking = 1 ) return webcomment_templates . tmpl_mini_review ( recid , ln , action = action , avg_score = calculate_avg_score ( reviews ) , nb_comments_total = len ( reviews ) )
Returns the web controls to add reviews to a record from the detailed record pages mini - panel .
57,114
def check_user_can_view_comments ( user_info , recid ) : ( auth_code , auth_msg ) = check_user_can_view_record ( user_info , recid ) if auth_code : return ( auth_code , auth_msg ) record_primary_collection = guess_primary_collection_of_a_record ( recid ) return acc_authorize_action ( user_info , 'viewcomment' , authorized_if_no_roles = True , collection = record_primary_collection )
Check if the user is authorized to view comments for given recid .
57,115
def check_user_can_view_comment ( user_info , comid , restriction = None ) : if restriction is None : comment = query_get_comment ( comid ) if comment : restriction = comment [ 11 ] else : return ( 1 , 'Comment %i does not exist' % comid ) if restriction == "" : return ( 0 , '' ) return acc_authorize_action ( user_info , 'viewrestrcomment' , status = restriction )
Check if the user is authorized to view a particular comment given the comment restriction . Note that this function does not check if the record itself is restricted to the user which would mean that the user should not see the comment .
57,116
def check_user_can_send_comments ( user_info , recid ) : record_primary_collection = guess_primary_collection_of_a_record ( recid ) return acc_authorize_action ( user_info , 'sendcomment' , authorized_if_no_roles = True , collection = record_primary_collection )
Check if the user is authorized to comment the given recid . This function does not check that user can view the record or view the comments
57,117
def check_user_can_attach_file_to_comments ( user_info , recid ) : record_primary_collection = guess_primary_collection_of_a_record ( recid ) return acc_authorize_action ( user_info , 'attachcommentfile' , authorized_if_no_roles = False , collection = record_primary_collection )
Check if the user is authorized to attach a file to comments for given recid . This function does not check that user can view the comments or send comments .
57,118
def get_user_collapsed_comments_for_record ( uid , recid ) : query = params = ( uid , recid ) return [ res [ 0 ] for res in run_sql ( query , params ) ]
Get the comments collapsed for given user on given recid page
57,119
def is_comment_deleted ( comid ) : query = params = ( comid , ) res = run_sql ( query , params ) if res and res [ 0 ] [ 0 ] != 'ok' : return True return False
Return True of the comment is deleted . Else False
57,120
def _fix_time ( self , dt ) : if dt . tzinfo is not None : dt = dt . replace ( tzinfo = None ) return dt
Stackdistiller converts all times to utc .
57,121
def strip_leading_comments ( text ) : margin = None text = _whitespace_only_re . sub ( '' , text ) indents = _leading_whitespace_re . findall ( text ) for indent in indents : if margin is None : margin = indent elif indent . startswith ( margin ) : pass elif margin . startswith ( indent ) : margin = indent else : margin = "" break if 0 and margin : for line in text . split ( "\n" ) : assert not line or line . startswith ( margin ) , "line = %r, margin = %r" % ( line , margin ) if margin : text = re . sub ( r'(?m)^' + margin , '' , text ) return text
Strips the leading whitespaces and % from the given text .
57,122
def parse ( target , trace = False , ** kwargs ) : if hasattr ( target , 'read' ) : file_content = target . read ( ) return parse ( file_content , trace , ** kwargs ) if os . path . isfile ( target ) : if target . endswith ( ".ily" ) or target . endswith ( ".ly" ) : console . display ( "Parsing" , target ) with io . open ( target , "r" , encoding = "utf-8" ) as fp : return parse ( fp , trace , filename = target , ** kwargs ) else : return [ ] if os . path . isdir ( target ) : docs = [ ] logging . info ( "Parsing directory {}" , target ) for root , _ , files in os . walk ( target ) : for f in files : fname = os . path . join ( root , f ) file_docs = parse ( fname , trace , ** kwargs ) docs . extend ( file_docs ) return docs metrics = kwargs . get ( "metrics" , None ) if metrics is not None : metrics . record_file ( target ) docs = [ ] parser = LilyParser ( parseinfo = True ) try : parser . parse ( target , 'lilypond' , semantics = DocumentationSemantics ( docs ) , filename = kwargs . get ( "filename" , None ) , trace = trace ) except FailedParse as err : logging . warn ( err ) if metrics is not None : metrics . record_error ( err ) except RuntimeError as err : logging . warn ( err ) if metrics is not None : metrics . record_error ( err ) return docs
Parse the given target . If it is a file - like object then parse its contents . If given a string perform one of the following actions
57,123
def find ( name , app = None , components = None , raw = False ) : if components is None : if app is None : from flask import current_app as app components = app . config . get ( 'COMPONENTS' , [ ] ) items = [ ] for key in components : module = import_module ( key ) item = getattr ( module , name , None ) if item is None : try : item = import_module ( '.' . join ( ( key , name ) ) ) except ImportError : continue if not raw : if isinstance ( item , types . ModuleType ) : all_ = getattr ( item , '__all__' , None ) if all_ : item = { n : getattr ( item , n ) for n in all_ } else : item = vars ( item ) items . append ( item ) return items
Discover any named attributes modules or packages and coalesces the results .
57,124
def auto_clear_shopping_cart ( self , auto_clear_shopping_cart ) : allowed_values = [ "never" , "orderCreated" , "orderCompleted" ] if auto_clear_shopping_cart is not None and auto_clear_shopping_cart not in allowed_values : raise ValueError ( "Invalid value for `auto_clear_shopping_cart` ({0}), must be one of {1}" . format ( auto_clear_shopping_cart , allowed_values ) ) self . _auto_clear_shopping_cart = auto_clear_shopping_cart
Sets the auto_clear_shopping_cart of this CartSettings .
57,125
def getView ( self , lv ) : view = None if str ( lv . GetName ( ) ) [ - 1 ] == 'X' : return 'X' elif str ( lv . GetName ( ) ) [ - 1 ] == 'Y' : return 'Y' self . log . error ( 'Cannot determine view for %s' , lv . GetName ( ) ) raise 'Cannot determine view for %s' % lv . GetName ( ) return view
Determine the detector view starting with a G4LogicalVolume
57,126
def RenderWidget ( self ) : t = self . type if t == int : ret = QSpinBox ( ) ret . setMaximum ( 999999999 ) ret . setValue ( self . value ) elif t == float : ret = QLineEdit ( ) ret . setText ( str ( self . value ) ) elif t == bool : ret = QCheckBox ( ) ret . setChecked ( self . value ) else : ret = QLineEdit ( ) ret . setText ( str ( self . value ) ) if self . toolTip is not None : ret . setToolTip ( self . toolTip ) self . widget = ret return ret
Returns a QWidget subclass instance . Exact class depends on self . type
57,127
def ScreenGenerator ( nfft , r0 , nx , ny ) : while 1 : layers = GenerateTwoScreens ( nfft , r0 ) for iLayer in range ( 2 ) : for iy in range ( int ( nfft / ny ) ) : for ix in range ( int ( nfft / nx ) ) : yield layers [ iLayer ] [ iy * ny : iy * ny + ny , ix * nx : ix * nx + nx ]
Generate an infinite series of rectangular phase screens Uses an FFT screen generator to make a large screen and then returns non - overlapping subsections of it
57,128
def parse_dates ( d , default = 'today' ) : if default == 'today' : default = datetime . datetime . today ( ) if d is None : return default elif isinstance ( d , _parsed_date_types ) : return d elif is_number ( d ) : d = d if isinstance ( d , float ) else float ( d ) return datetime . datetime . utcfromtimestamp ( d ) elif not isinstance ( d , STRING_TYPES ) : if hasattr ( d , '__iter__' ) : return [ parse_dates ( s , default ) for s in d ] else : return default elif len ( d ) == 0 : return default else : try : return parser . parse ( d ) except ( AttributeError , ValueError ) : return default
Parses one or more dates from d
57,129
def load_with_classes ( filename , classes ) : ok = False for class_ in classes : obj = class_ ( ) try : obj . load ( filename ) ok = True except FileNotFoundError : raise except Exception as e : if a99 . logging_level == logging . DEBUG : a99 . get_python_logger ( ) . exception ( "Error trying with class \"{0!s}\"" . format ( class_ . __name__ ) ) pass if ok : break if ok : return obj return None
Attempts to load file by trial - and - error using a given list of classes .
57,130
def load_any_file ( filename ) : import f311 if a99 . is_text_file ( filename ) : return load_with_classes ( filename , f311 . classes_txt ( ) ) else : return load_with_classes ( filename , f311 . classes_bin ( ) )
Attempts to load filename by trial - and - error
57,131
def load_spectrum ( filename ) : import f311 f = load_with_classes ( filename , f311 . classes_sp ( ) ) if f : return f . spectrum return None
Attempts to load spectrum as one of the supported types .
57,132
def load_spectrum_fits_messed_x ( filename , sp_ref = None ) : import f311 . filetypes as ft f = load_with_classes ( filename , ( ft . FileSpectrumFits , ) ) if f is not None : ret = f . spectrum else : hdul = fits . open ( filename ) hdu = hdul [ 0 ] if not hdu . header . get ( "CDELT1" ) : hdu . header [ "CDELT1" ] = 1 if sp_ref is None else sp_ref . delta_lambda if not hdu . header . get ( "CRVAL1" ) : hdu . header [ "CRVAL1" ] = 0 if sp_ref is None else sp_ref . x [ 0 ] ret = ft . Spectrum ( ) ret . from_hdu ( hdu ) ret . filename = filename original_shape = ret . y . shape ret . y = ret . y . squeeze ( ) if len ( ret . y . shape ) > 1 : raise RuntimeError ( "Data contains more than 1 dimension (shape is {0!s}), " "FITS file is not single spectrum" . format ( original_shape ) ) return ret
Loads FITS file spectrum that does not have the proper headers . Returns a Spectrum
57,133
def get_filetypes_info ( editor_quote = "`" , flag_leaf = True ) : NONE_REPL = "" import f311 data = [ ] for attr in f311 . classes_file ( flag_leaf ) : description = a99 . get_obj_doc0 ( attr ) def_ = NONE_REPL if attr . default_filename is None else attr . default_filename ee = attr . editors if ee is None : ee = NONE_REPL else : ee = ", " . join ( [ "{0}{1}{0}" . format ( editor_quote , x , editor_quote ) for x in ee ] ) data . append ( { "description" : description , "default_filename" : def_ , "classname" : attr . __name__ , "editors" : ee , "class" : attr , "txtbin" : "text" if attr . flag_txt else "binary" } ) data . sort ( key = lambda x : x [ "description" ] ) return data
Reports available data types
57,134
def tabulate_filetypes_rest ( attrnames = None , header = None , flag_wrap_description = True , description_width = 40 , flag_leaf = True ) : infos = get_filetypes_info ( editor_quote = "``" , flag_leaf = flag_leaf ) rows , header = filetypes_info_to_rows_header ( infos , attrnames , header , flag_wrap_description , description_width ) ret = a99 . rest_table ( rows , header ) return ret
Generates a reST multirow table
57,135
def return_action ( self , return_action ) : allowed_values = [ "refund" , "storeCredit" ] if return_action is not None and return_action not in allowed_values : raise ValueError ( "Invalid value for `return_action` ({0}), must be one of {1}" . format ( return_action , allowed_values ) ) self . _return_action = return_action
Sets the return_action of this ReturnSettings .
57,136
def show_config ( config ) : print ( "\nCurrent Configuration:\n" ) for k , v in sorted ( config . config . items ( ) ) : print ( "{0:15}: {1}" . format ( k , v ) )
Show the current configuration .
57,137
def create_cloud_user ( cfg , args ) : url = cfg [ 'api_server' ] + "admin/add-user" params = { 'user_email' : args . user_email , 'user_name' : args . user_name , 'user_role' : args . user_role , 'email' : cfg [ 'email' ] , 'api_key' : cfg [ 'api_key' ] } headers = { 'Content-Type' : 'application/json' } response = requests . post ( url , data = json . dumps ( params ) , headers = headers ) if response . status_code not in range ( 200 , 299 ) : raise Exception ( "Errors contacting the cloud node: %s" % ( response . content ) ) loaded = json . loads ( response . content ) return loaded
Attempt to create the user on the cloud node .
57,138
def make_store ( name , min_length = 4 , ** kwargs ) : if name not in stores : raise ValueError ( 'valid stores are {0}' . format ( ', ' . join ( stores ) ) ) if name == 'memcache' : store = MemcacheStore elif name == 'memory' : store = MemoryStore elif name == 'redis' : store = RedisStore return store ( min_length = min_length , ** kwargs )
\ Creates a store with a reasonable keygen .
57,139
def do_copy ( self , line ) : words = line . split ( ) source , destination = words destination_repo = self . _get_destination_repo ( destination ) local_file_source = ':' not in source if local_file_source : destination_repo . upload_packages ( [ source ] ) else : source_repo_name , _ , package_spec = source . partition ( ':' ) try : source_repo = self . network . get_repo ( source_repo_name ) except UnknownRepoError : raise ShellError ( 'Unknown repository {}' . format ( source_repo_name ) ) try : source_repo . download_packages ( package_spec , self . __temp_dir ) destination_repo . upload_packages ( self . __temp_dir . files ) finally : self . __temp_dir . clear ( )
Copy packages between repos
57,140
def do_work_on ( self , repo ) : self . abort_on_nonexisting_repo ( repo , 'work_on' ) self . network . active_repo = repo
Make repo the active one . Commands working on a repo will use it as default for repo parameter .
57,141
def do_status ( self , line ) : print ( '{} {}' . format ( bold ( 'Pyrene version' ) , green ( get_version ( ) ) ) ) pip_conf = os . path . expanduser ( '~/.pip/pip.conf' ) if os . path . exists ( pip_conf ) : conf = read_file ( pip_conf ) repo = self . _get_repo_for_pip_conf ( conf ) if repo : print ( '{} is configured for repository "{}"' . format ( bold ( pip_conf ) , green ( repo . name ) ) ) else : print ( '{} exists, but is a {}' . format ( bold ( pip_conf ) , red ( 'custom configuration' ) ) ) else : print ( '{} {}' . format ( bold ( pip_conf ) , red ( 'does not exists' ) ) ) if os . path . exists ( self . pypirc ) : template = green ( 'exists' ) else : template = red ( 'does not exists' ) template = '{} ' + template print ( template . format ( bold ( self . pypirc ) ) )
Show python packaging configuration status
57,142
def do_forget ( self , repo ) : self . abort_on_nonexisting_repo ( repo , 'forget' ) self . network . forget ( repo )
Drop definition of a repo .
57,143
def do_set ( self , line ) : self . abort_on_invalid_active_repo ( 'set' ) repo = self . network . active_repo attribute , eq , value = line . partition ( '=' ) if not attribute : raise ShellError ( 'command "set" requires a non-empty attribute' ) if not eq : raise ShellError ( 'command "set" requires a value' ) self . network . set ( repo , attribute , value )
Set repository attributes on the active repo .
57,144
def do_list ( self , line ) : repo_names = self . network . repo_names print ( 'Known repos:' ) print ( ' ' + '\n ' . join ( repo_names ) )
List known repos
57,145
def do_show ( self , repo ) : self . abort_on_nonexisting_effective_repo ( repo , 'show' ) repo = self . network . get_repo ( repo ) repo . print_attributes ( )
List repo attributes
57,146
def convert ( filename , num_questions = None , solution = False , pages_per_q = DEFAULT_PAGES_PER_Q , folder = 'question_pdfs' , output = 'gradescope.pdf' , zoom = 1 ) : check_for_wkhtmltohtml ( ) save_notebook ( filename ) nb = read_nb ( filename , solution = solution ) pdf_names = create_question_pdfs ( nb , pages_per_q = pages_per_q , folder = folder , zoom = zoom ) merge_pdfs ( pdf_names , output ) n_questions_found = len ( pdf_names ) - 1 if num_questions is not None and n_questions_found != num_questions : logging . warning ( 'We expected there to be {} questions but there are only {} in ' 'your final PDF. Gradescope will most likely not accept your ' 'submission. Double check that you wrote your answers in the ' 'cells that we provided.' . format ( num_questions , len ( pdf_names ) ) ) try : from IPython . display import display , HTML display ( HTML ( DOWNLOAD_HTML . format ( output ) ) ) except ImportError : print ( 'Done! The resulting PDF is located in this directory and is ' 'called {}. Upload that PDF to Gradescope for grading.' . format ( output ) ) print ( ) print ( 'If the font size of your PDF is too small/large, change the value ' 'of the zoom argument when calling convert. For example, setting ' 'zoom=2 makes everything twice as big.' )
Public method that exports nb to PDF and pads all the questions .
57,147
def check_for_wkhtmltohtml ( ) : locator = 'where' if sys . platform == 'win32' else 'which' wkhtmltopdf = ( subprocess . Popen ( [ locator , 'wkhtmltopdf' ] , stdout = subprocess . PIPE ) . communicate ( ) [ 0 ] . strip ( ) ) if not os . path . exists ( wkhtmltopdf ) : logging . error ( 'No wkhtmltopdf executable found. Please install ' 'wkhtmltopdf before trying again - {}' . format ( WKHTMLTOPDF_URL ) ) raise ValueError ( 'No wkhtmltopdf executable found. Please install ' 'wkhtmltopdf before trying again - {}' . format ( WKHTMLTOPDF_URL ) )
Checks to see if the wkhtmltohtml binary is installed . Raises error if not .
57,148
def read_nb ( filename , solution ) -> nbformat . NotebookNode : with open ( filename , 'r' ) as f : nb = nbformat . read ( f , as_version = 4 ) email = find_student_email ( nb ) preamble = nbformat . v4 . new_markdown_cell ( source = '# ' + email , metadata = { 'tags' : [ 'q_email' ] } ) tags_to_check = TAGS if not solution else SOL_TAGS cells = ( [ preamble ] + [ remove_input ( cell ) for cell in nb [ 'cells' ] if cell_has_tags ( cell , tags_to_check ) ] ) nb [ 'cells' ] = cells return nb
Takes in a filename of a notebook and returns a notebook object containing only the cell outputs to export .
57,149
def nb_to_html_cells ( nb ) -> list : html_exporter = HTMLExporter ( ) html_exporter . template_file = 'basic' ( body , resources ) = html_exporter . from_notebook_node ( nb ) return BeautifulSoup ( body , 'html.parser' ) . findAll ( 'div' , class_ = 'cell' )
Converts notebook to an iterable of BS4 HTML nodes . Images are inline .
57,150
def nb_to_q_nums ( nb ) -> list : def q_num ( cell ) : assert cell . metadata . tags return first ( filter ( lambda t : 'q' in t , cell . metadata . tags ) ) return [ q_num ( cell ) for cell in nb [ 'cells' ] ]
Gets question numbers from each cell in the notebook
57,151
def pad_pdf_pages ( pdf_name , pages_per_q ) -> None : pdf = PyPDF2 . PdfFileReader ( pdf_name ) output = PyPDF2 . PdfFileWriter ( ) num_pages = pdf . getNumPages ( ) if num_pages > pages_per_q : logging . warning ( '{} has {} pages. Only the first ' '{} pages will get output.' . format ( pdf_name , num_pages , pages_per_q ) ) for page in range ( min ( num_pages , pages_per_q ) ) : output . addPage ( pdf . getPage ( page ) ) if num_pages < pages_per_q : for page in range ( pages_per_q - num_pages ) : output . addBlankPage ( ) with open ( pdf_name , 'wb' ) as out_file : output . write ( out_file )
Checks if PDF has the correct number of pages . If it has too many warns the user . If it has too few adds blank pages until the right length is reached .
57,152
def create_question_pdfs ( nb , pages_per_q , folder , zoom ) -> list : html_cells = nb_to_html_cells ( nb ) q_nums = nb_to_q_nums ( nb ) os . makedirs ( folder , exist_ok = True ) pdf_options = PDF_OPTS . copy ( ) pdf_options [ 'zoom' ] = ZOOM_FACTOR * zoom pdf_names = [ ] for question , cell in zip ( q_nums , html_cells ) : pdf_name = os . path . join ( folder , '{}.pdf' . format ( question ) ) pdfkit . from_string ( cell . prettify ( ) , pdf_name , options = pdf_options ) pad_pdf_pages ( pdf_name , pages_per_q ) print ( 'Created ' + pdf_name ) pdf_names . append ( pdf_name ) return pdf_names
Converts each cells in tbe notebook to a PDF named something like q04c . pdf . Places PDFs in the specified folder and returns the list of created PDF locations .
57,153
def merge_pdfs ( pdf_names , output ) -> None : merger = PyPDF2 . PdfFileMerger ( ) for filename in pdf_names : merger . append ( filename ) merger . write ( output ) merger . close ( )
Merges all pdfs together into a single long PDF .
57,154
def connection_count ( self ) : return self . sql ( "SELECT value FROM %(master)s WHERE name = 'connection_counter'" % vars ( self ) , cache = False , asrecarray = False ) [ 0 ] [ 0 ]
Number of currently open connections to the database .
57,155
def sql_select ( self , fields , * args , ** kwargs ) : SQL = "SELECT " + str ( fields ) + " FROM __self__ " + " " . join ( args ) return self . sql ( SQL , ** kwargs )
Execute a simple SQL SELECT statement and returns values as new numpy rec array .
57,156
def sql ( self , SQL , parameters = None , asrecarray = True , cache = True ) : SQL = SQL . replace ( '__self__' , self . name ) if not '?' in SQL and cache and SQL in self . __cache : return self . __cache [ SQL ] c = self . cursor if parameters is None : c . execute ( SQL ) else : c . execute ( SQL , parameters ) if c . rowcount > 0 or SQL . upper ( ) . find ( 'DELETE' ) > - 1 : self . __cache . clear ( ) result = c . fetchall ( ) if not result : return [ ] if asrecarray : try : names = [ x [ 0 ] for x in c . description ] result = numpy . rec . fromrecords ( result , names = names ) except : raise TypeError ( "SQLArray.sql(): failed to return recarray, try setting asrecarray=False to return tuples instead" ) else : pass if cache : self . __cache . append ( SQL , result ) return result
Execute sql statement .
57,157
def limits ( self , variable ) : ( vmin , vmax ) , = self . SELECT ( 'min(%(variable)s), max(%(variable)s)' % vars ( ) ) return vmin , vmax
Return minimum and maximum of variable across all rows of data .
57,158
def selection ( self , SQL , parameters = None , ** kwargs ) : force = kwargs . pop ( 'force' , False ) safe_sql = re . match ( r'(?P<SQL>[^;]*)' , SQL ) . group ( 'SQL' ) if re . match ( r'\s*SELECT.*FROM' , safe_sql , flags = re . IGNORECASE ) : _sql = safe_sql else : _sql = + str ( safe_sql ) _sql = _sql . replace ( '__self__' , self . name ) newname = kwargs . pop ( 'name' , 'selection_' + md5 ( _sql ) . hexdigest ( ) ) if newname in ( "__self__" , self . name ) : raise ValueError ( "Table name %(newname)r cannot refer to the parent table itself." % vars ( ) ) has_newname = self . has_table ( newname ) c = self . cursor if has_newname and force : c . execute ( "DROP TABLE %(newname)s" % vars ( ) ) has_newname = False if not has_newname : _sql = "CREATE TABLE %(newname)s AS " % vars ( ) + _sql if parameters is None : c . execute ( _sql ) else : c . execute ( _sql , parameters ) return SQLarray ( newname , None , dbfile = self . dbfile , connection = self . connection )
Return a new SQLarray from a SELECT selection .
57,159
def _init_sqlite_functions ( self ) : self . connection . create_function ( "sqrt" , 1 , sqlfunctions . _sqrt ) self . connection . create_function ( "sqr" , 1 , sqlfunctions . _sqr ) self . connection . create_function ( "periodic" , 1 , sqlfunctions . _periodic ) self . connection . create_function ( "pow" , 2 , sqlfunctions . _pow ) self . connection . create_function ( "match" , 2 , sqlfunctions . _match ) self . connection . create_function ( "regexp" , 2 , sqlfunctions . _regexp ) self . connection . create_function ( "fformat" , 2 , sqlfunctions . _fformat ) self . connection . create_aggregate ( "std" , 1 , sqlfunctions . _Stdev ) self . connection . create_aggregate ( "stdN" , 1 , sqlfunctions . _StdevN ) self . connection . create_aggregate ( "median" , 1 , sqlfunctions . _Median ) self . connection . create_aggregate ( "array" , 1 , sqlfunctions . _NumpyArray ) self . connection . create_aggregate ( "histogram" , 4 , sqlfunctions . _NumpyHistogram ) self . connection . create_aggregate ( "distribution" , 4 , sqlfunctions . _NormedNumpyHistogram ) self . connection . create_aggregate ( "meanhistogram" , 5 , sqlfunctions . _MeanHistogram ) self . connection . create_aggregate ( "stdhistogram" , 5 , sqlfunctions . _StdHistogram ) self . connection . create_aggregate ( "minhistogram" , 5 , sqlfunctions . _MinHistogram ) self . connection . create_aggregate ( "maxhistogram" , 5 , sqlfunctions . _MaxHistogram ) self . connection . create_aggregate ( "medianhistogram" , 5 , sqlfunctions . _MedianHistogram ) self . connection . create_aggregate ( "zscorehistogram" , 5 , sqlfunctions . _ZscoreHistogram )
additional SQL functions to the database
57,160
def _prune ( self ) : delkeys = [ k for k in self . keys ( ) if k not in self . __ringbuffer ] for k in delkeys : super ( KRingbuffer , self ) . __delitem__ ( k )
Primitive way to keep dict in sync with RB .
57,161
def chunk_on ( pipeline , new_chunk_signal , output_type = tuple ) : assert iterable ( pipeline ) , 'chunks needs pipeline to be iterable' assert callable ( new_chunk_signal ) , 'chunks needs new_chunk_signal to be callable' assert callable ( output_type ) , 'chunks needs output_type to be callable' out = deque ( ) for i in pipeline : if new_chunk_signal ( i ) and len ( out ) : yield output_type ( out ) out . clear ( ) out . append ( i ) if len ( out ) : yield output_type ( out )
split the stream into seperate chunks based on a new chunk signal
57,162
def center_image ( self , img ) : img . anchor_x = img . width // 2 img . anchor_y = img . height // 2
Sets an image s anchor point to its center
57,163
def get_persons ( self ) : cs = self . data [ "to" ] [ "data" ] res = [ ] for c in cs : res . append ( c [ "name" ] ) return res
Returns list of strings which represents persons being chated with
57,164
def get_messages ( self ) : cs = self . data [ "comments" ] [ "data" ] res = [ ] for c in cs : res . append ( Message ( c , self ) ) return res
Returns list of Message objects which represents messages being transported .
57,165
def next ( self ) : c = Conversation ( self . data , requests . get ( self . data [ "comments" ] [ "paging" ] [ "next" ] ) . json ( ) ) if "error" in c . data [ "comments" ] and c . data [ "comments" ] [ "error" ] [ "code" ] == 613 : raise LimitExceededException ( ) return c
Returns next paging
57,166
def _subset_table ( full_table , subset ) : if not subset : return full_table conditions = subset . replace ( ' ' , '' ) . split ( ';' ) valid = np . ones ( len ( full_table ) , dtype = bool ) for condition in conditions : col = re . split ( "[<>=!]" , condition ) [ 0 ] comp = condition . replace ( col , "" ) try : this_valid = eval ( "full_table['{0}']{1}" . format ( col , comp ) ) except KeyError as e : raise KeyError ( "Column '%s' not found" % e . message ) valid = np . logical_and ( valid , this_valid ) return full_table [ valid ]
Return subtable matching all conditions in subset
57,167
def _subset_meta ( full_meta , subset , incremented = False ) : if not subset : return full_meta , False meta = { } for key , val in full_meta . iteritems ( ) : meta [ key ] = copy . deepcopy ( dict ( val ) ) conditions = subset . replace ( ' ' , '' ) . split ( ';' ) inc = False for condition in conditions : condition_list = re . split ( '[<>=]' , condition ) col = condition_list [ 0 ] val = condition_list [ - 1 ] try : col_step = meta [ col ] [ 'step' ] except : continue operator = re . sub ( '[^<>=]' , '' , condition ) if operator == '==' : meta [ col ] [ 'min' ] = val meta [ col ] [ 'max' ] = val elif operator == '>=' : meta [ col ] [ 'min' ] = val elif operator == '>' : if incremented : meta [ col ] [ 'min' ] = val else : meta [ col ] [ 'min' ] = str ( eval ( val ) + eval ( col_step ) ) inc = True elif operator == '<=' : meta [ col ] [ 'max' ] = val elif operator == '<' : if incremented : meta [ col ] [ 'max' ] = val else : meta [ col ] [ 'max' ] = str ( eval ( val ) - eval ( col_step ) ) inc = True else : raise ValueError , "Subset %s not valid" % condition return meta , inc
Return metadata reflecting all conditions in subset
57,168
def sad ( patch , cols , splits , clean = True ) : ( spp_col , count_col ) , patch = _get_cols ( [ 'spp_col' , 'count_col' ] , cols , patch ) full_spp_list = np . unique ( patch . table [ spp_col ] ) result_list = [ ] for substring , subpatch in _yield_subpatches ( patch , splits ) : sad_list = [ ] for spp in full_spp_list : this_spp = ( subpatch . table [ spp_col ] == spp ) count = np . sum ( subpatch . table [ count_col ] [ this_spp ] ) sad_list . append ( count ) subdf = pd . DataFrame ( { 'spp' : full_spp_list , 'y' : sad_list } ) if clean : subdf = subdf [ subdf [ 'y' ] > 0 ] result_list . append ( ( substring , subdf ) ) return result_list
Calculates an empirical species abundance distribution
57,169
def ssad ( patch , cols , splits ) : sad_results = sad ( patch , cols , splits , clean = False ) for i , sad_result in enumerate ( sad_results ) : if i == 0 : fulldf = sad_result [ 1 ] fulldf . columns = [ 'spp' , '0' ] else : fulldf [ str ( i ) ] = sad_result [ 1 ] [ 'y' ] result_list = [ ] for _ , row in fulldf . iterrows ( ) : row_values_array = np . array ( row [ 1 : ] , dtype = float ) result_list . append ( ( row [ 0 ] , pd . DataFrame ( { 'y' : row_values_array } ) ) ) return result_list
Calculates an empirical intra - specific spatial abundance distribution
57,170
def sar ( patch , cols , splits , divs , ear = False ) : def sar_y_func ( spatial_table , all_spp ) : return np . mean ( spatial_table [ 'n_spp' ] ) def ear_y_func ( spatial_table , all_spp ) : endemic_counter = 0 for spp in all_spp : spp_in_cell = [ spp in x for x in spatial_table [ 'spp_set' ] ] spp_n_cells = np . sum ( spp_in_cell ) if spp_n_cells == 1 : endemic_counter += 1 n_cells = len ( spatial_table ) return endemic_counter / n_cells if ear : y_func = ear_y_func else : y_func = sar_y_func return _sar_ear_inner ( patch , cols , splits , divs , y_func )
Calculates an empirical species area or endemics area relationship
57,171
def _sar_ear_inner ( patch , cols , splits , divs , y_func ) : ( spp_col , count_col , x_col , y_col ) , patch = _get_cols ( [ 'spp_col' , 'count_col' , 'x_col' , 'y_col' ] , cols , patch ) result_list = [ ] for substring , subpatch in _yield_subpatches ( patch , splits ) : A0 = _patch_area ( subpatch , x_col , y_col ) all_spp = np . unique ( subpatch . table [ spp_col ] ) subresultx = [ ] subresulty = [ ] subresultnspp = [ ] subresultnindivids = [ ] subdivlist = _split_divs ( divs ) for subdiv in subdivlist : spatial_table = _yield_spatial_table ( subpatch , subdiv , spp_col , count_col , x_col , y_col ) subresulty . append ( y_func ( spatial_table , all_spp ) ) subresultx . append ( A0 / eval ( subdiv . replace ( ',' , '*' ) ) ) subresultnspp . append ( np . mean ( spatial_table [ 'n_spp' ] ) ) subresultnindivids . append ( np . mean ( spatial_table [ 'n_individs' ] ) ) subresult = pd . DataFrame ( { 'div' : subdivlist , 'x' : subresultx , 'y' : subresulty , 'n_spp' : subresultnspp , 'n_individs' : subresultnindivids } ) result_list . append ( ( substring , subresult ) ) return result_list
y_func is function calculating the mean number of species or endemics respectively for the SAR or EAR
57,172
def comm_grid ( patch , cols , splits , divs , metric = 'Sorensen' ) : ( spp_col , count_col , x_col , y_col ) , patch = _get_cols ( [ 'spp_col' , 'count_col' , 'x_col' , 'y_col' ] , cols , patch ) result_list = [ ] for substring , subpatch in _yield_subpatches ( patch , splits ) : spatial_table = _yield_spatial_table ( subpatch , divs , spp_col , count_col , x_col , y_col ) spp_set = spatial_table [ 'spp_set' ] cell_loc = spatial_table [ 'cell_loc' ] n_spp = spatial_table [ 'n_spp' ] pair_list = [ ] dist_list = [ ] comm_list = [ ] for i in range ( len ( spatial_table ) ) : for j in range ( i + 1 , len ( spatial_table ) ) : iloc = np . round ( cell_loc [ i ] , 6 ) jloc = np . round ( cell_loc [ j ] , 6 ) pair_list . append ( '(' + str ( iloc [ 0 ] ) + ' ' + str ( iloc [ 1 ] ) + ') - ' + '(' + str ( jloc [ 0 ] ) + ' ' + str ( jloc [ 1 ] ) + ')' ) dist_list . append ( _distance ( cell_loc [ i ] , cell_loc [ j ] ) ) ij_intersect = spp_set [ i ] & spp_set [ j ] if metric . lower ( ) == 'sorensen' : comm = 2 * len ( ij_intersect ) / ( n_spp [ i ] + n_spp [ j ] ) elif metric . lower ( ) == 'jaccard' : comm = len ( ij_intersect ) / len ( spp_set [ i ] | spp_set [ j ] ) else : raise ValueError , ( "Only Sorensen and Jaccard metrics are " "available for gridded commonality" ) comm_list . append ( comm ) subresult = pd . DataFrame ( { 'pair' : pair_list , 'x' : dist_list , 'y' : comm_list } ) result_list . append ( ( substring , subresult ) ) return result_list
Calculates commonality as a function of distance for a gridded patch
57,173
def _yield_spatial_table ( patch , div , spp_col , count_col , x_col , y_col ) : try : div_split_list = div . replace ( ';' , '' ) . split ( ',' ) except AttributeError : div_split_list = str ( div ) . strip ( "()" ) . split ( ',' ) div_split = ( x_col + ':' + div_split_list [ 0 ] + ';' + y_col + ':' + div_split_list [ 1 ] ) x_starts , x_ends = _col_starts_ends ( patch , x_col , div_split_list [ 0 ] ) x_offset = ( x_ends [ 0 ] - x_starts [ 0 ] ) / 2 x_locs = x_starts + x_offset y_starts , y_ends = _col_starts_ends ( patch , y_col , div_split_list [ 1 ] ) y_offset = ( y_ends [ 0 ] - y_starts [ 0 ] ) / 2 y_locs = y_starts + y_offset cell_locs = _product ( x_locs , y_locs ) n_spp_list = [ ] n_individs_list = [ ] spp_set_list = [ ] for cellstring , cellpatch in _yield_subpatches ( patch , div_split , name = 'div' ) : spp_set = set ( np . unique ( cellpatch . table [ spp_col ] ) ) spp_set_list . append ( spp_set ) n_spp_list . append ( len ( spp_set ) ) n_individs_list . append ( np . sum ( cellpatch . table [ count_col ] ) ) df = pd . DataFrame ( { 'cell_loc' : cell_locs , 'spp_set' : spp_set_list , 'n_spp' : n_spp_list , 'n_individs' : n_individs_list } ) return df
Calculates an empirical spatial table
57,174
def _get_cols ( special_col_names , cols , patch ) : if not cols : if 'cols' in patch . meta [ 'Description' ] . keys ( ) : cols = patch . meta [ 'Description' ] [ 'cols' ] else : raise NameError , ( "cols argument not given, spp_col at a minimum " "must be specified" ) cols = cols . replace ( ' ' , '' ) col_list = cols . split ( ';' ) col_dict = { x . split ( ':' ) [ 0 ] : x . split ( ':' ) [ 1 ] for x in col_list } result = [ ] for special_col_name in special_col_names : col_name = col_dict . get ( special_col_name , None ) if special_col_name is 'count_col' and col_name is None : col_name = 'count' patch . table [ 'count' ] = np . ones ( len ( patch . table ) ) if col_name is None : raise ValueError , ( "Required column %s not specified" % special_col_name ) result . append ( col_name ) return tuple ( result ) , patch
Retrieve values of special_cols from cols string or patch metadata
57,175
def _yield_subpatches ( patch , splits , name = 'split' ) : if splits : subset_list = _parse_splits ( patch , splits ) for subset in subset_list : logging . info ( 'Analyzing subset %s: %s' % ( name , subset ) ) subpatch = copy . copy ( patch ) subpatch . table = _subset_table ( patch . table , subset ) subpatch . meta , subpatch . incremented = _subset_meta ( patch . meta , subset , incremented = True ) yield subset , subpatch else : yield '' , patch
Iterator for subtables defined by a splits string
57,176
def _parse_splits ( patch , splits ) : split_list = splits . replace ( ' ' , '' ) . split ( ';' ) subset_list = [ ] for split in split_list : col , val = split . split ( ':' ) if val == 'split' : uniques = [ ] for level in patch . table [ col ] : if level not in uniques : uniques . append ( level ) level_list = [ col + '==' + str ( x ) + '; ' for x in uniques ] else : starts , ends = _col_starts_ends ( patch , col , val ) level_list = [ col + '>=' + str ( x ) + '; ' + col + '<' + str ( y ) + '; ' for x , y in zip ( starts , ends ) ] subset_list . append ( level_list ) return [ '' . join ( x ) [ : - 2 ] for x in _product ( * subset_list ) ]
Parse splits string to get list of all associated subset strings .
57,177
def _product ( * args , ** kwds ) : pools = map ( tuple , args ) * kwds . get ( 'repeat' , 1 ) result = [ [ ] ] for pool in pools : result = [ x + [ y ] for x in result for y in pool ] return result
Generates cartesian product of lists given as arguments
57,178
def empirical_cdf ( data ) : vals = pd . Series ( data ) . value_counts ( ) ecdf = pd . DataFrame ( data ) . set_index ( keys = 0 ) probs = pd . DataFrame ( vals . sort_index ( ) . cumsum ( ) / np . float ( len ( data ) ) ) ecdf = ecdf . join ( probs ) ecdf = ecdf . reset_index ( ) ecdf . columns = [ 'data' , 'ecdf' ] return ecdf
Generates an empirical cdf from data
57,179
def _load_table ( self , metadata_path , data_path ) : metadata_dir = os . path . dirname ( os . path . expanduser ( metadata_path ) ) data_path = os . path . normpath ( os . path . join ( metadata_dir , data_path ) ) extension = data_path . split ( '.' ) [ - 1 ] if extension == 'csv' : full_table = pd . read_csv ( data_path , index_col = False ) table = _subset_table ( full_table , self . subset ) self . meta , _ = _subset_meta ( self . meta , self . subset ) elif extension in [ 'db' , 'sql' ] : table = self . _get_db_table ( data_path , extension ) else : raise TypeError ( 'Cannot process file of type %s' % extension ) return table
Load data table taking subset if needed
57,180
def _get_db_table ( self , data_path , extension ) : raise NotImplementedError , "SQL and db file formats not yet supported" if extension == 'sql' : con = lite . connect ( ':memory:' ) con . row_factory = lite . Row cur = con . cursor ( ) with open ( data_path , 'r' ) as f : sql = f . read ( ) cur . executescript ( sql ) else : con = lite . connect ( data_path ) con . row_factory = lite . Row cur = con . cursor ( ) cur . execute ( self . subset ) db_info = cur . fetchall ( ) try : col_names = db_info [ 0 ] . keys ( ) except IndexError : raise lite . OperationalError ( "Query %s to database %s is empty" % ( query_str , data_path ) ) converted_info = [ tuple ( x ) for x in db_info ] dtypes = [ type ( x ) if type ( x ) != unicode else 'S150' for x in db_info [ 0 ] ] table = np . array ( converted_info , dtype = zip ( col_names , dtypes ) ) con . commit ( ) con . close ( ) return table . view ( np . recarray )
Query a database and return query result as a recarray
57,181
def doc_sub ( * sub ) : def dec ( obj ) : obj . __doc__ = obj . __doc__ . format ( * sub ) return obj return dec
Decorator for performing substitutions in docstrings .
57,182
def log_start_end ( f ) : def inner ( f , * args , ** kwargs ) : logging . info ( 'Starting %s' % f . __name__ ) res = f ( * args , ** kwargs ) logging . info ( 'Finished %s' % f . __name__ ) return res return decorator . decorator ( inner , f )
Decorator to log start and end of function
57,183
def check_parameter_file ( filename ) : with open ( filename , "r" ) as fin : content = fin . read ( ) bad_names = [ ] line_numbers = [ ] strs = [ "cols" , "splits" , "divs" ] for tstr in strs : start = content . find ( tstr ) while start != - 1 : cols_str = "" . join ( content [ start : ] . split ( "\n" ) [ 0 ] . split ( "=" ) [ - 1 ] . split ( " " ) ) semis = cols_str . count ( ";" ) line_end = content . find ( "\n" , start ) line_number = content [ : line_end ] . count ( "\n" ) + 1 if tstr == "divs" : colons = cols_str . count ( "," ) else : colons = cols_str . count ( ":" ) if colons != ( semis + 1 ) : bad_names . append ( tstr ) line_numbers . append ( line_number ) start = content . find ( tstr , start + 1 ) return bad_names , line_numbers
Function does a rudimentary check whether the cols splits and divs columns in the parameter files are formatted properly .
57,184
def handle_starttag ( self , tag , attrs ) : if tag . lower ( ) in self . allowed_tag_whitelist : if tag . lower ( ) == 'ol' : self . previous_nbs . append ( self . nb ) self . nb = 0 self . previous_type_lists . append ( tag . lower ( ) ) self . result = self . result . rstrip ( ) elif tag . lower ( ) == 'ul' : self . previous_type_lists . append ( tag . lower ( ) ) self . result = self . result . rstrip ( ) elif tag . lower ( ) == 'li' : self . result = self . result . rstrip ( ) if self . previous_type_lists [ - 1 ] == 'ol' : self . nb += 1 self . result += '\n' + self . line_quotation + ' ' * len ( self . previous_type_lists ) + str ( self . nb ) + '. ' else : self . result += '\n' + self . line_quotation + ' ' * len ( self . previous_type_lists ) + '* ' elif tag . lower ( ) == 'a' : for ( attr , value ) in attrs : if attr . lower ( ) == 'href' : self . url = value self . result += '<' + value + '>'
Function called for new opening tags
57,185
def handle_entityref ( self , name ) : char_code = html_entities . name2codepoint . get ( name , None ) if char_code is not None : try : self . result += unichr ( char_code ) . encode ( "utf-8" ) except : return
Process a general entity reference of the form &name ; . Transform to text whenever possible .
57,186
def bethe_lattice ( energy , hopping ) : energy = np . asarray ( energy ) . clip ( - 2 * hopping , 2 * hopping ) return np . sqrt ( 4 * hopping ** 2 - energy ** 2 ) / ( 2 * np . pi * hopping ** 2 )
Bethe lattice in inf dim density of states
57,187
def bethe_fermi ( energy , quasipart , shift , hopping , beta ) : return fermi_dist ( quasipart * energy - shift , beta ) * bethe_lattice ( energy , hopping )
product of the bethe lattice dos fermi distribution
57,188
def bethe_fermi_ene ( energy , quasipart , shift , hopping , beta ) : return energy * bethe_fermi ( energy , quasipart , shift , hopping , beta )
product of the bethe lattice dos fermi distribution an weighted by energy
57,189
def bethe_filling_zeroT ( fermi_energy , hopping ) : fermi_energy = np . asarray ( fermi_energy ) . clip ( - 2 * hopping , 2 * hopping ) return 1 / 2. + fermi_energy / 2 * bethe_lattice ( fermi_energy , hopping ) + np . arcsin ( fermi_energy / 2 / hopping ) / np . pi
Returns the particle average count given a certan fermi energy for the semicircular density of states of the bethe lattice
57,190
def bethe_findfill_zeroT ( particles , orbital_e , hopping ) : assert 0. <= particles <= len ( orbital_e ) zero = lambda e : np . sum ( [ bethe_filling_zeroT ( e - e_m , t ) for t , e_m in zip ( hopping , orbital_e ) ] ) - particles return fsolve ( zero , 0 )
Return the fermi energy that correspond to the given particle quantity in a semicircular density of states of a bethe lattice in a multi orbital case that can be non - degenerate
57,191
def bethe_find_crystalfield ( populations , hopping ) : zero = lambda orb : [ bethe_filling_zeroT ( - em , tz ) - pop for em , tz , pop in zip ( orb , hopping , populations ) ] return fsolve ( zero , np . zeros ( len ( populations ) ) )
Return the orbital energies to have the system populates as desired by the given individual populations
57,192
def _get_var_from_string ( item ) : modname , varname = _split_mod_var_names ( item ) if modname : mod = __import__ ( modname , globals ( ) , locals ( ) , [ varname ] , - 1 ) return getattr ( mod , varname ) else : return globals ( ) [ varname ]
Get resource variable .
57,193
def _handle_list ( reclist ) : ret = [ ] for item in reclist : recs = _handle_resource_setting ( item ) ret += [ resource for resource in recs if resource . access_controller ] return ret
Return list of resources that have access_controller defined .
57,194
def _ensure_content_type ( ) : from django . contrib . contenttypes . models import ContentType try : row = ContentType . objects . get ( app_label = PERM_APP_NAME ) except ContentType . DoesNotExist : row = ContentType ( name = PERM_APP_NAME , app_label = PERM_APP_NAME , model = PERM_APP_NAME ) row . save ( ) return row . id
Add the bulldog content type to the database if it s missing .
57,195
def _get_permission_description ( permission_name ) : parts = permission_name . split ( '_' ) parts . pop ( 0 ) method = parts . pop ( ) resource = ( '_' . join ( parts ) ) . lower ( ) return 'Can %s %s' % ( method . upper ( ) , resource )
Generate a descriptive string based on the permission name .
57,196
def _populate_permissions ( resources , content_type_id ) : from django . contrib . auth . models import Permission db_perms = [ perm . codename for perm in Permission . objects . all ( ) ] for resource in resources : perms = [ perm for perm in resource . access_controller . get_perm_names ( resource ) if perm not in db_perms ] for perm in perms : _save_new_permission ( perm , content_type_id )
Add all missing permissions to the database .
57,197
def init_default ( self ) : import f311 if self . default_filename is None : raise RuntimeError ( "Class '{}' has no default filename" . format ( self . __class__ . __name__ ) ) fullpath = f311 . get_default_data_path ( self . default_filename , class_ = self . __class__ ) self . load ( fullpath ) name , ext = os . path . splitext ( self . default_filename ) new = a99 . new_filename ( os . path . join ( "./" , name ) , ext ) self . save_as ( new )
Overriden to take default database and save locally
57,198
def _do_save_as ( self , filename ) : if filename != self . filename : self . _ensure_filename ( ) self . _close_if_open ( ) shutil . copyfile ( self . filename , filename ) self . __get_conn ( filename = filename )
Closes connection copies DB file and opens again pointing to new file
57,199
def ensure_schema ( self ) : self . _ensure_filename ( ) if not os . path . isfile ( self . filename ) : self . create_schema ( )
Create file and schema if it does not exist yet .