idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
54,300
def staticmap ( ctx , mapid , output , features , lat , lon , zoom , size ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None if features : features = list ( cligj . normalize_feature_inputs ( None , 'features' , [ features ] ) ) service = mapbox . Static ( access_token = access_token ) try : res = service . image ( mapid , lon = lon , lat = lat , z = zoom , width = size [ 0 ] , height = size [ 1 ] , features = features , sort_keys = True ) except mapbox . errors . ValidationError as exc : raise click . BadParameter ( str ( exc ) ) if res . status_code == 200 : output . write ( res . content ) else : raise MapboxCLIException ( res . text . strip ( ) )
Generate static map images from existing Mapbox map ids . Optionally overlay with geojson features .
54,301
def main_group ( ctx , verbose , quiet , access_token , config ) : ctx . obj = { } config = config or os . path . join ( click . get_app_dir ( 'mapbox' ) , 'mapbox.ini' ) cfg = read_config ( config ) if cfg : ctx . obj [ 'config_file' ] = config ctx . obj [ 'cfg' ] = cfg ctx . default_map = cfg verbosity = ( os . environ . get ( 'MAPBOX_VERBOSE' ) or ctx . lookup_default ( 'mapbox.verbosity' ) or 0 ) if verbose or quiet : verbosity = verbose - quiet verbosity = int ( verbosity ) configure_logging ( verbosity ) access_token = ( access_token or os . environ . get ( 'MAPBOX_ACCESS_TOKEN' ) or os . environ . get ( 'MapboxAccessToken' ) or ctx . lookup_default ( 'mapbox.access-token' ) ) ctx . obj [ 'verbosity' ] = verbosity ctx . obj [ 'access_token' ] = access_token
This is the command line interface to Mapbox web services .
54,302
def config ( ctx ) : ctx . default_map = ctx . obj [ 'cfg' ] click . echo ( "CLI:" ) click . echo ( "access-token = {0}" . format ( ctx . obj [ 'access_token' ] ) ) click . echo ( "verbosity = {0}" . format ( ctx . obj [ 'verbosity' ] ) ) click . echo ( "" ) click . echo ( "Environment:" ) if 'MAPBOX_ACCESS_TOKEN' in os . environ : click . echo ( "MAPBOX_ACCESS_TOKEN = {0}" . format ( os . environ [ 'MAPBOX_ACCESS_TOKEN' ] ) ) if 'MapboxAccessToken' in os . environ : click . echo ( "MapboxAccessToken = {0}" . format ( os . environ [ 'MapboxAccessToken' ] ) ) if 'MAPBOX_VERBOSE' in os . environ : click . echo ( "MAPBOX_VERBOSE = {0}" . format ( os . environ [ 'MAPBOX_VERBOSE' ] ) ) click . echo ( "" ) if 'config_file' in ctx . obj : click . echo ( "Config file {0}:" . format ( ctx . obj [ 'config_file' ] ) ) for key , value in ctx . default_map . items ( ) : click . echo ( "{0} = {1}" . format ( key , value ) ) click . echo ( "" )
Show access token and other configuration settings .
54,303
def echo_headers ( headers , file = None ) : for k , v in sorted ( headers . items ( ) ) : click . echo ( "{0}: {1}" . format ( k . title ( ) , v ) , file = file ) click . echo ( file = file )
Echo headers sorted .
54,304
def datasets ( ctx ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None service = mapbox . Datasets ( access_token = access_token ) ctx . obj [ 'service' ] = service
Read and write GeoJSON from Mapbox - hosted datasets
54,305
def create ( ctx , name , description ) : service = ctx . obj . get ( 'service' ) res = service . create ( name , description ) if res . status_code == 200 : click . echo ( res . text ) else : raise MapboxCLIException ( res . text . strip ( ) )
Create a new dataset .
54,306
def read_dataset ( ctx , dataset , output ) : stdout = click . open_file ( output , 'w' ) service = ctx . obj . get ( 'service' ) res = service . read_dataset ( dataset ) if res . status_code == 200 : click . echo ( res . text , file = stdout ) else : raise MapboxCLIException ( res . text . strip ( ) )
Read the attributes of a dataset .
54,307
def list_features ( ctx , dataset , reverse , start , limit , output ) : stdout = click . open_file ( output , 'w' ) service = ctx . obj . get ( 'service' ) res = service . list_features ( dataset , reverse , start , limit ) if res . status_code == 200 : click . echo ( res . text , file = stdout ) else : raise MapboxCLIException ( res . text . strip ( ) )
Get features of a dataset .
54,308
def put_feature ( ctx , dataset , fid , feature , input ) : if feature is None : stdin = click . open_file ( input , 'r' ) feature = stdin . read ( ) feature = json . loads ( feature ) service = ctx . obj . get ( 'service' ) res = service . update_feature ( dataset , fid , feature ) if res . status_code == 200 : click . echo ( res . text ) else : raise MapboxCLIException ( res . text . strip ( ) )
Create or update a dataset feature .
54,309
def delete_feature ( ctx , dataset , fid ) : service = ctx . obj . get ( 'service' ) res = service . delete_feature ( dataset , fid ) if res . status_code != 204 : raise MapboxCLIException ( res . text . strip ( ) )
Delete a feature .
54,310
def create_tileset ( ctx , dataset , tileset , name ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None service = mapbox . Uploader ( access_token = access_token ) uri = "mapbox://datasets/{username}/{dataset}" . format ( username = tileset . split ( '.' ) [ 0 ] , dataset = dataset ) res = service . create ( uri , tileset , name ) if res . status_code == 201 : click . echo ( res . text ) else : raise MapboxCLIException ( res . text . strip ( ) )
Create a vector tileset from a dataset .
54,311
def directions ( ctx , features , profile , alternatives , geometries , overview , steps , continue_straight , waypoint_snapping , annotations , language , output ) : access_token = ( ctx . obj and ctx . obj . get ( "access_token" ) ) or None service = mapbox . Directions ( access_token = access_token ) if overview == "False" : overview = False if waypoint_snapping is not None : features = list ( features ) if annotations : annotations = annotations . split ( "," ) stdout = click . open_file ( output , "w" ) try : res = service . directions ( features , profile = profile , alternatives = alternatives , geometries = geometries , overview = overview , steps = steps , continue_straight = continue_straight , waypoint_snapping = waypoint_snapping , annotations = annotations , language = language ) except mapbox . errors . ValidationError as exc : raise click . BadParameter ( str ( exc ) ) if res . status_code == 200 : if geometries == "geojson" : click . echo ( json . dumps ( res . geojson ( ) ) , file = stdout ) else : click . echo ( res . text , file = stdout ) else : raise MapboxCLIException ( res . text . strip ( ) )
The Mapbox Directions API will show you how to get where you re going .
54,312
def upload ( ctx , tileset , datasource , name , patch ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None service = mapbox . Uploader ( access_token = access_token ) if name is None : name = tileset . split ( "." ) [ - 1 ] if datasource . startswith ( 'https://' ) : res = service . create ( datasource , tileset , name = name , patch = patch ) else : sourcefile = click . File ( 'rb' ) ( datasource ) if hasattr ( sourcefile , 'name' ) : filelen = ( 1 if sourcefile . name == '<stdin>' else os . stat ( sourcefile . name ) . st_size ) else : filelen = ( len ( sourcefile . getbuffer ( ) ) if hasattr ( sourcefile , 'getbuffer' ) else 1 ) with click . progressbar ( length = filelen , label = 'Uploading data source' , fill_char = "#" , empty_char = '-' , file = sys . stderr ) as bar : def callback ( num_bytes ) : bar . update ( num_bytes ) res = service . upload ( sourcefile , tileset , name , patch = patch , callback = callback ) if res . status_code == 201 : click . echo ( res . text ) else : raise MapboxCLIException ( res . text . strip ( ) )
Upload data to Mapbox accounts .
54,313
def _save_notebook ( self , os_path , nb ) : with self . atomic_writing ( os_path , encoding = 'utf-8' ) as f : if ftdetect ( os_path ) == 'notebook' : nbformat . write ( nb , f , version = nbformat . NO_CONVERT ) elif ftdetect ( os_path ) == 'markdown' : nbjson = nbformat . writes ( nb , version = nbformat . NO_CONVERT ) markdown = convert ( nbjson , informat = 'notebook' , outformat = 'markdown' , strip_outputs = self . strip_outputs ) f . write ( markdown )
Save a notebook to an os_path .
54,314
def ftdetect ( filename ) : _ , extension = os . path . splitext ( filename ) md_exts = [ '.md' , '.markdown' , '.mkd' , '.mdown' , '.mkdn' , '.Rmd' ] nb_exts = [ '.ipynb' ] if extension in md_exts : return 'markdown' elif extension in nb_exts : return 'notebook' else : return None
Determine if filename is markdown or notebook based on the file extension .
54,315
def strip ( notebook ) : for cell in notebook . cells : if cell . cell_type == 'code' : cell . outputs = [ ] cell . execution_count = None
Remove outputs from a notebook .
54,316
def get_caption_comments ( content ) : if not content . startswith ( '## fig:' ) : return None , None content = content . splitlines ( ) id = content [ 0 ] . strip ( '## ' ) caption = [ ] for line in content [ 1 : ] : if not line . startswith ( '# ' ) or line . startswith ( '##' ) : break else : caption . append ( line . lstrip ( '# ' ) . rstrip ( ) ) caption = '"' + ' ' . join ( caption ) + '"' return id , caption
Retrieve an id and a caption from a code cell .
54,317
def new_code_block ( self , ** kwargs ) : proto = { 'content' : '' , 'type' : self . code , 'IO' : '' , 'attributes' : '' } proto . update ( ** kwargs ) return proto
Create a new code block .
54,318
def new_text_block ( self , ** kwargs ) : proto = { 'content' : '' , 'type' : self . markdown } proto . update ( ** kwargs ) return proto
Create a new text block .
54,319
def pre_process_code_block ( block ) : if 'indent' in block and block [ 'indent' ] : indent = r'^' + block [ 'indent' ] block [ 'content' ] = re . sub ( indent , '' , block [ 'icontent' ] , flags = re . MULTILINE )
Preprocess the content of a code block modifying the code block in place .
54,320
def process_code_block ( self , block ) : if block [ 'type' ] != self . code : return block attr = PandocAttributes ( block [ 'attributes' ] , 'markdown' ) if self . match == 'all' : pass elif self . match == 'fenced' and block . get ( 'indent' ) : return self . new_text_block ( content = ( '\n' + block [ 'icontent' ] + '\n' ) ) elif self . match == 'strict' and 'input' not in attr . classes : return self . new_text_block ( content = block [ 'raw' ] ) elif self . match not in list ( attr . classes ) + [ 'fenced' , 'strict' ] : return self . new_text_block ( content = block [ 'raw' ] ) if 'output' in attr . classes and 'json' in attr . classes : block [ 'IO' ] = 'output' elif 'input' in attr . classes : block [ 'IO' ] = 'input' attr . classes . remove ( 'input' ) else : block [ 'IO' ] = 'input' if self . caption_comments : id , caption = get_caption_comments ( block [ 'content' ] ) if id : attr . id = id if caption : attr [ 'caption' ] = caption try : language = set ( attr . classes ) . intersection ( languages ) . pop ( ) attr . classes . remove ( language ) except KeyError : language = None block [ 'language' ] = language block [ 'attributes' ] = attr if language in ( 'python' , 'py' , '' , None ) : block [ 'language' ] = self . python elif language != self . python and self . magic : block [ 'content' ] = CodeMagician . magic ( language ) + block [ 'content' ] block [ 'language' ] = language return self . new_code_block ( ** block )
Parse block attributes
54,321
def parse_blocks ( self , text ) : code_matches = [ m for m in self . code_pattern . finditer ( text ) ] text_starts = [ 0 ] + [ m . end ( ) for m in code_matches ] text_stops = [ m . start ( ) for m in code_matches ] + [ len ( text ) ] text_limits = list ( zip ( text_starts , text_stops ) ) code_blocks = [ self . new_code_block ( ** m . groupdict ( ) ) for m in code_matches ] text_blocks = [ self . new_text_block ( content = text [ i : j ] ) for i , j in text_limits ] list ( map ( self . pre_process_code_block , code_blocks ) ) list ( map ( self . pre_process_text_block , text_blocks ) ) all_blocks = list ( range ( len ( text_blocks ) + len ( code_blocks ) ) ) all_blocks [ : : 2 ] = text_blocks all_blocks [ 1 : : 2 ] = code_blocks all_blocks = [ cell for cell in all_blocks if cell [ 'content' ] ] return all_blocks
Extract the code and non - code blocks from given markdown text .
54,322
def create_code_cell ( block ) : code_cell = nbbase . new_code_cell ( source = block [ 'content' ] ) attr = block [ 'attributes' ] if not attr . is_empty : code_cell . metadata = nbbase . NotebookNode ( { 'attributes' : attr . to_dict ( ) } ) execution_count = attr . kvs . get ( 'n' ) if not execution_count : code_cell . execution_count = None else : code_cell . execution_count = int ( execution_count ) return code_cell
Create a notebook code cell from a block .
54,323
def create_markdown_cell ( block ) : kwargs = { 'cell_type' : block [ 'type' ] , 'source' : block [ 'content' ] } markdown_cell = nbbase . new_markdown_cell ( ** kwargs ) return markdown_cell
Create a markdown cell from a block .
54,324
def create_cells ( self , blocks ) : cells = [ ] for block in blocks : if ( block [ 'type' ] == self . code ) and ( block [ 'IO' ] == 'input' ) : code_cell = self . create_code_cell ( block ) cells . append ( code_cell ) elif ( block [ 'type' ] == self . code and block [ 'IO' ] == 'output' and cells [ - 1 ] . cell_type == 'code' ) : cells [ - 1 ] . outputs = self . create_outputs ( block ) elif block [ 'type' ] == self . markdown : markdown_cell = self . create_markdown_cell ( block ) cells . append ( markdown_cell ) else : raise NotImplementedError ( "{} is not supported as a cell" "type" . format ( block [ 'type' ] ) ) return cells
Turn the list of blocks into a list of notebook cells .
54,325
def to_notebook ( self , s , ** kwargs ) : all_blocks = self . parse_blocks ( s ) if self . pre_code_block [ 'content' ] : all_blocks . insert ( 0 , self . pre_code_block ) blocks = [ self . process_code_block ( block ) for block in all_blocks ] cells = self . create_cells ( blocks ) nb = nbbase . new_notebook ( cells = cells ) return nb
Convert the markdown string s to an IPython notebook .
54,326
def write_resources ( self , resources ) : for filename , data in list ( resources . get ( 'outputs' , { } ) . items ( ) ) : dest = os . path . join ( self . output_dir , filename ) path = os . path . dirname ( dest ) if path and not os . path . isdir ( path ) : os . makedirs ( path ) with open ( dest , 'wb' ) as f : f . write ( data )
Write the output data in resources returned by exporter to files .
54,327
def string2json ( self , string ) : kwargs = { 'cls' : BytesEncoder , 'indent' : 1 , 'sort_keys' : True , 'separators' : ( ',' , ': ' ) , } return cast_unicode ( json . dumps ( string , ** kwargs ) , 'utf-8' )
Convert json into its string representation . Used for writing outputs to markdown .
54,328
def create_attributes ( self , cell , cell_type = None ) : if self . strip_outputs or not hasattr ( cell , 'execution_count' ) : return 'python' attrs = cell . metadata . get ( 'attributes' ) attr = PandocAttributes ( attrs , 'dict' ) if 'python' in attr . classes : attr . classes . remove ( 'python' ) if 'input' in attr . classes : attr . classes . remove ( 'input' ) if cell_type == 'figure' : attr . kvs . pop ( 'caption' , '' ) attr . classes . append ( 'figure' ) attr . classes . append ( 'output' ) return attr . to_html ( ) elif cell_type == 'input' : attr . classes . insert ( 0 , 'python' ) attr . classes . insert ( 1 , 'input' ) if cell . execution_count : attr . kvs [ 'n' ] = cell . execution_count return attr . to_markdown ( format = '{classes} {id} {kvs}' ) else : return attr . to_markdown ( )
Turn the attribute dict into an attribute string for the code block .
54,329
def dequote ( s ) : if len ( s ) < 2 : return s elif ( s [ 0 ] == s [ - 1 ] ) and s . startswith ( ( '"' , "'" ) ) : return s [ 1 : - 1 ] else : return s
Remove excess quotes from a string .
54,330
def data2uri ( data , data_type ) : MIME_MAP = { 'image/jpeg' : 'jpeg' , 'image/png' : 'png' , 'text/plain' : 'text' , 'text/html' : 'html' , 'text/latex' : 'latex' , 'application/javascript' : 'html' , 'image/svg+xml' : 'svg' , } inverse_map = { v : k for k , v in list ( MIME_MAP . items ( ) ) } mime_type = inverse_map [ data_type ] uri = r"data:{mime};base64,{data}" return uri . format ( mime = mime_type , data = data [ mime_type ] . replace ( '\n' , '' ) )
Convert base64 data into a data uri with the given data_type .
54,331
def magic ( self , alias ) : if alias in self . aliases : return self . aliases [ alias ] else : return "%%{}\n" . format ( alias )
Returns the appropriate IPython code magic when called with an alias for a language .
54,332
def knit ( self , input_file , opts_chunk = 'eval=FALSE' ) : tmp_in = tempfile . NamedTemporaryFile ( mode = 'w+' ) tmp_out = tempfile . NamedTemporaryFile ( mode = 'w+' ) tmp_in . file . write ( input_file . read ( ) ) tmp_in . file . flush ( ) tmp_in . file . seek ( 0 ) self . _knit ( tmp_in . name , tmp_out . name , opts_chunk ) tmp_out . file . flush ( ) return tmp_out
Use Knitr to convert the r - markdown input_file into markdown returning a file object .
54,333
def is_path_protected ( path ) : protected = True for exclude_path in TERMS_EXCLUDE_URL_PREFIX_LIST : if path . startswith ( exclude_path ) : protected = False for contains_path in TERMS_EXCLUDE_URL_CONTAINS_LIST : if contains_path in path : protected = False if path in TERMS_EXCLUDE_URL_LIST : protected = False if path . startswith ( ACCEPT_TERMS_PATH ) : protected = False return protected
returns True if given path is to be protected otherwise False
54,334
def process_request ( self , request ) : LOGGER . debug ( 'termsandconditions.middleware' ) current_path = request . META [ 'PATH_INFO' ] if DJANGO_VERSION <= ( 2 , 0 , 0 ) : user_authenticated = request . user . is_authenticated ( ) else : user_authenticated = request . user . is_authenticated if user_authenticated and is_path_protected ( current_path ) : for term in TermsAndConditions . get_active_terms_not_agreed_to ( request . user ) : qs = request . META [ 'QUERY_STRING' ] current_path += '?' + qs if qs else '' return redirect_to_terms_accept ( current_path , term . slug ) return None
Process each request to app to ensure terms have been accepted
54,335
def get_context_data ( self , ** kwargs ) : context = super ( TermsView , self ) . get_context_data ( ** kwargs ) context [ 'terms_base_template' ] = getattr ( settings , 'TERMS_BASE_TEMPLATE' , DEFAULT_TERMS_BASE_TEMPLATE ) return context
Pass additional context data
54,336
def get_initial ( self ) : LOGGER . debug ( 'termsandconditions.views.AcceptTermsView.get_initial' ) terms = self . get_terms ( self . kwargs ) return_to = self . request . GET . get ( 'returnTo' , '/' ) return { 'terms' : terms , 'returnTo' : return_to }
Override of CreateView method queries for which T&C to accept and catches returnTo from URL
54,337
def post ( self , request , * args , ** kwargs ) : return_url = request . POST . get ( 'returnTo' , '/' ) terms_ids = request . POST . getlist ( 'terms' ) if not terms_ids : return HttpResponseRedirect ( return_url ) if DJANGO_VERSION <= ( 2 , 0 , 0 ) : user_authenticated = request . user . is_authenticated ( ) else : user_authenticated = request . user . is_authenticated if user_authenticated : user = request . user else : if 'partial_pipeline' in request . session : user_pk = request . session [ 'partial_pipeline' ] [ 'kwargs' ] [ 'user' ] [ 'pk' ] user = User . objects . get ( id = user_pk ) else : return HttpResponseRedirect ( '/' ) store_ip_address = getattr ( settings , 'TERMS_STORE_IP_ADDRESS' , True ) if store_ip_address : ip_address = request . META . get ( getattr ( settings , 'TERMS_IP_HEADER_NAME' , DEFAULT_TERMS_IP_HEADER_NAME ) ) else : ip_address = "" for terms_id in terms_ids : try : new_user_terms = UserTermsAndConditions ( user = user , terms = TermsAndConditions . objects . get ( pk = int ( terms_id ) ) , ip_address = ip_address ) new_user_terms . save ( ) except IntegrityError : pass return HttpResponseRedirect ( return_url )
Handles POST request .
54,338
def form_valid ( self , form ) : LOGGER . debug ( 'termsandconditions.views.EmailTermsView.form_valid' ) template = get_template ( "termsandconditions/tc_email_terms.html" ) template_rendered = template . render ( { "terms" : form . cleaned_data . get ( 'terms' ) } ) LOGGER . debug ( "Email Terms Body:" ) LOGGER . debug ( template_rendered ) try : send_mail ( form . cleaned_data . get ( 'email_subject' , _ ( 'Terms' ) ) , template_rendered , settings . DEFAULT_FROM_EMAIL , [ form . cleaned_data . get ( 'email_address' ) ] , fail_silently = False ) messages . add_message ( self . request , messages . INFO , _ ( "Terms and Conditions Sent." ) ) except SMTPException : messages . add_message ( self . request , messages . ERROR , _ ( "An Error Occurred Sending Your Message." ) ) self . success_url = form . cleaned_data . get ( 'returnTo' , '/' ) or '/' return super ( EmailTermsView , self ) . form_valid ( form )
Override of CreateView method sends the email .
54,339
def form_invalid ( self , form ) : LOGGER . debug ( "Invalid Email Form Submitted" ) messages . add_message ( self . request , messages . ERROR , _ ( "Invalid Email Address." ) ) return super ( EmailTermsView , self ) . form_invalid ( form )
Override of CreateView method logs invalid email form submissions .
54,340
def terms_required ( view_func ) : @ wraps ( view_func , assigned = available_attrs ( view_func ) ) def _wrapped_view ( request , * args , ** kwargs ) : if DJANGO_VERSION <= ( 2 , 0 , 0 ) : user_authenticated = request . user . is_authenticated ( ) else : user_authenticated = request . user . is_authenticated if not user_authenticated or not TermsAndConditions . get_active_terms_not_agreed_to ( request . user ) : return view_func ( request , * args , ** kwargs ) current_path = request . path login_url_parts = list ( urlparse ( ACCEPT_TERMS_PATH ) ) querystring = QueryDict ( login_url_parts [ 4 ] , mutable = True ) querystring [ 'returnTo' ] = current_path login_url_parts [ 4 ] = querystring . urlencode ( safe = '/' ) return HttpResponseRedirect ( urlunparse ( login_url_parts ) ) return _wrapped_view
This decorator checks to see if the user is logged in and if so if they have accepted the site terms .
54,341
def get_active ( slug = DEFAULT_TERMS_SLUG ) : active_terms = cache . get ( 'tandc.active_terms_' + slug ) if active_terms is None : try : active_terms = TermsAndConditions . objects . filter ( date_active__isnull = False , date_active__lte = timezone . now ( ) , slug = slug ) . latest ( 'date_active' ) cache . set ( 'tandc.active_terms_' + slug , active_terms , TERMS_CACHE_SECONDS ) except TermsAndConditions . DoesNotExist : LOGGER . error ( "Requested Terms and Conditions that Have Not Been Created." ) return None return active_terms
Finds the latest of a particular terms and conditions
54,342
def get_active_terms_ids ( ) : active_terms_ids = cache . get ( 'tandc.active_terms_ids' ) if active_terms_ids is None : active_terms_dict = { } active_terms_ids = [ ] active_terms_set = TermsAndConditions . objects . filter ( date_active__isnull = False , date_active__lte = timezone . now ( ) ) . order_by ( 'date_active' ) for active_terms in active_terms_set : active_terms_dict [ active_terms . slug ] = active_terms . id active_terms_dict = OrderedDict ( sorted ( active_terms_dict . items ( ) , key = lambda t : t [ 0 ] ) ) for terms in active_terms_dict : active_terms_ids . append ( active_terms_dict [ terms ] ) cache . set ( 'tandc.active_terms_ids' , active_terms_ids , TERMS_CACHE_SECONDS ) return active_terms_ids
Returns a list of the IDs of of all terms and conditions
54,343
def get_active_terms_list ( ) : active_terms_list = cache . get ( 'tandc.active_terms_list' ) if active_terms_list is None : active_terms_list = TermsAndConditions . objects . filter ( id__in = TermsAndConditions . get_active_terms_ids ( ) ) . order_by ( 'slug' ) cache . set ( 'tandc.active_terms_list' , active_terms_list , TERMS_CACHE_SECONDS ) return active_terms_list
Returns all the latest active terms and conditions
54,344
def get_active_terms_not_agreed_to ( user ) : if TERMS_EXCLUDE_USERS_WITH_PERM is not None : if user . has_perm ( TERMS_EXCLUDE_USERS_WITH_PERM ) and not user . is_superuser : return [ ] not_agreed_terms = cache . get ( 'tandc.not_agreed_terms_' + user . get_username ( ) ) if not_agreed_terms is None : try : LOGGER . debug ( "Not Agreed Terms" ) not_agreed_terms = TermsAndConditions . get_active_terms_list ( ) . exclude ( userterms__in = UserTermsAndConditions . objects . filter ( user = user ) ) . order_by ( 'slug' ) cache . set ( 'tandc.not_agreed_terms_' + user . get_username ( ) , not_agreed_terms , TERMS_CACHE_SECONDS ) except ( TypeError , UserTermsAndConditions . DoesNotExist ) : return [ ] return not_agreed_terms
Checks to see if a specified user has agreed to all the latest terms and conditions
54,345
def show_terms_if_not_agreed ( context , field = TERMS_HTTP_PATH_FIELD ) : request = context [ 'request' ] url = urlparse ( request . META [ field ] ) not_agreed_terms = TermsAndConditions . get_active_terms_not_agreed_to ( request . user ) if not_agreed_terms and is_path_protected ( url . path ) : return { 'not_agreed_terms' : not_agreed_terms , 'returnTo' : url . path } else : return { }
Displays a modal on a current page if a user has not yet agreed to the given terms . If terms are not specified the default slug is used .
54,346
def user_accept_terms ( backend , user , uid , social_user = None , * args , ** kwargs ) : LOGGER . debug ( 'user_accept_terms' ) if TermsAndConditions . get_active_terms_not_agreed_to ( user ) : return redirect_to_terms_accept ( '/' ) else : return { 'social_user' : social_user , 'user' : user }
Check if the user has accepted the terms and conditions after creation .
54,347
def redirect_to_terms_accept ( current_path = '/' , slug = 'default' ) : redirect_url_parts = list ( urlparse ( ACCEPT_TERMS_PATH ) ) if slug != 'default' : redirect_url_parts [ 2 ] += slug querystring = QueryDict ( redirect_url_parts [ 4 ] , mutable = True ) querystring [ TERMS_RETURNTO_PARAM ] = current_path redirect_url_parts [ 4 ] = querystring . urlencode ( safe = '/' ) return HttpResponseRedirect ( urlunparse ( redirect_url_parts ) )
Redirect the user to the terms and conditions accept page .
54,348
def user_terms_updated ( sender , ** kwargs ) : LOGGER . debug ( "User T&C Updated Signal Handler" ) if kwargs . get ( 'instance' ) . user : cache . delete ( 'tandc.not_agreed_terms_' + kwargs . get ( 'instance' ) . user . get_username ( ) )
Called when user terms and conditions is changed - to force cache clearing
54,349
def terms_updated ( sender , ** kwargs ) : LOGGER . debug ( "T&C Updated Signal Handler" ) cache . delete ( 'tandc.active_terms_ids' ) cache . delete ( 'tandc.active_terms_list' ) if kwargs . get ( 'instance' ) . slug : cache . delete ( 'tandc.active_terms_' + kwargs . get ( 'instance' ) . slug ) for utandc in UserTermsAndConditions . objects . all ( ) : cache . delete ( 'tandc.not_agreed_terms_' + utandc . user . get_username ( ) )
Called when terms and conditions is changed - to force cache clearing
54,350
def paginate ( parser , token , paginator_class = None ) : try : tag_name , tag_args = token . contents . split ( None , 1 ) except ValueError : msg = '%r tag requires arguments' % token . contents . split ( ) [ 0 ] raise template . TemplateSyntaxError ( msg ) match = PAGINATE_EXPRESSION . match ( tag_args ) if match is None : msg = 'Invalid arguments for %r tag' % tag_name raise template . TemplateSyntaxError ( msg ) kwargs = match . groupdict ( ) objects = kwargs . pop ( 'objects' ) if '.' in objects and kwargs [ 'var_name' ] is None : msg = ( '%(tag)r tag requires a variable name `as` argumnent if the ' 'queryset is provided as a nested context variable (%(objects)s). ' 'You must either pass a direct queryset (e.g. taking advantage ' 'of the `with` template tag) or provide a new variable name to ' 'store the resulting queryset (e.g. `%(tag)s %(objects)s as ' 'objects`).' ) % { 'tag' : tag_name , 'objects' : objects } raise template . TemplateSyntaxError ( msg ) return PaginateNode ( paginator_class , objects , ** kwargs )
Paginate objects .
54,351
def get_pages ( parser , token ) : try : tag_name , args = token . contents . split ( None , 1 ) except ValueError : var_name = 'pages' else : args = args . split ( ) if len ( args ) == 2 and args [ 0 ] == 'as' : var_name = args [ 1 ] else : msg = 'Invalid arguments for %r tag' % tag_name raise template . TemplateSyntaxError ( msg ) return GetPagesNode ( var_name )
Add to context the list of page links .
54,352
def show_pages ( parser , token ) : if len ( token . contents . split ( ) ) != 1 : msg = '%r tag takes no arguments' % token . contents . split ( ) [ 0 ] raise template . TemplateSyntaxError ( msg ) return ShowPagesNode ( )
Show page links .
54,353
def show_current_number ( parser , token ) : try : tag_name , args = token . contents . split ( None , 1 ) except ValueError : key = None number = None tag_name = token . contents [ 0 ] var_name = None else : match = SHOW_CURRENT_NUMBER_EXPRESSION . match ( args ) if match is None : msg = 'Invalid arguments for %r tag' % tag_name raise template . TemplateSyntaxError ( msg ) groupdict = match . groupdict ( ) key = groupdict [ 'key' ] number = groupdict [ 'number' ] var_name = groupdict [ 'var_name' ] return ShowCurrentNumberNode ( number , key , var_name )
Show the current page number or insert it in the context .
54,354
def page_template ( template , key = PAGE_LABEL ) : def decorator ( view ) : @ wraps ( view ) def decorated ( request , * args , ** kwargs ) : extra_context = kwargs . setdefault ( 'extra_context' , { } ) extra_context [ 'page_template' ] = template querystring_key = request . GET . get ( QS_KEY , request . POST . get ( QS_KEY , PAGE_LABEL ) ) if request . is_ajax ( ) and querystring_key == key : kwargs [ TEMPLATE_VARNAME ] = template return view ( request , * args , ** kwargs ) return decorated return decorator
Return a view dynamically switching template if the request is Ajax .
54,355
def _get_template ( querystring_key , mapping ) : default = None try : template_and_keys = mapping . items ( ) except AttributeError : template_and_keys = mapping for template , key in template_and_keys : if key is None : key = PAGE_LABEL default = template if key == querystring_key : return template return default
Return the template corresponding to the given querystring_key .
54,356
def get_queryset ( self ) : if self . queryset is not None : queryset = self . queryset if hasattr ( queryset , '_clone' ) : queryset = queryset . _clone ( ) elif self . model is not None : queryset = self . model . _default_manager . all ( ) else : msg = '{0} must define ``queryset`` or ``model``' raise ImproperlyConfigured ( msg . format ( self . __class__ . __name__ ) ) return queryset
Get the list of items for this view .
54,357
def get_context_object_name ( self , object_list ) : if self . context_object_name : return self . context_object_name elif hasattr ( object_list , 'model' ) : object_name = object_list . model . _meta . object_name . lower ( ) return smart_str ( '{0}_list' . format ( object_name ) ) else : return None
Get the name of the item to be used in the context .
54,358
def get_page_template ( self , ** kwargs ) : opts = self . object_list . model . _meta return '{0}/{1}{2}{3}.html' . format ( opts . app_label , opts . object_name . lower ( ) , self . template_name_suffix , self . page_template_suffix , )
Return the template name used for this request .
54,359
def render_link ( self ) : extra_context = { 'add_nofollow' : settings . ADD_NOFOLLOW , 'page' : self , 'querystring_key' : self . querystring_key , } if self . is_current : template_name = 'el_pagination/current_link.html' else : template_name = 'el_pagination/page_link.html' if settings . USE_NEXT_PREVIOUS_LINKS : if self . is_previous : template_name = 'el_pagination/previous_link.html' if self . is_next : template_name = 'el_pagination/next_link.html' if template_name not in _template_cache : _template_cache [ template_name ] = loader . get_template ( template_name ) template = _template_cache [ template_name ] with self . context . push ( ** extra_context ) : return template . render ( self . context . flatten ( ) )
Render the page as a link .
54,360
def previous ( self ) : if self . _page . has_previous ( ) : return self . _endless_page ( self . _page . previous_page_number ( ) , label = settings . PREVIOUS_LABEL ) return ''
Return the previous page .
54,361
def next ( self ) : if self . _page . has_next ( ) : return self . _endless_page ( self . _page . next_page_number ( ) , label = settings . NEXT_LABEL ) return ''
Return the next page .
54,362
def start_index ( self ) : paginator = self . paginator if paginator . count == 0 : return 0 elif self . number == 1 : return 1 return ( ( self . number - 2 ) * paginator . per_page + paginator . first_page + 1 )
Return the 1 - based index of the first item on this page .
54,363
def end_index ( self ) : paginator = self . paginator if self . number == paginator . num_pages : return paginator . count return ( self . number - 1 ) * paginator . per_page + paginator . first_page
Return the 1 - based index of the last item on this page .
54,364
def get_page_numbers ( current_page , num_pages , extremes = DEFAULT_CALLABLE_EXTREMES , arounds = DEFAULT_CALLABLE_AROUNDS , arrows = DEFAULT_CALLABLE_ARROWS ) : page_range = range ( 1 , num_pages + 1 ) pages = [ ] if current_page != 1 : if arrows : pages . append ( 'first' ) pages . append ( 'previous' ) first = page_range [ : extremes ] pages . extend ( first ) last = page_range [ - extremes : ] current_start = current_page - 1 - arounds if current_start < 0 : current_start = 0 current_end = current_page + arounds if current_end > num_pages : current_end = num_pages current = page_range [ current_start : current_end ] to_add = current if extremes : diff = current [ 0 ] - first [ - 1 ] if diff > 1 : pages . append ( None ) elif diff < 1 : to_add = current [ abs ( diff ) + 1 : ] pages . extend ( to_add ) if extremes : diff = last [ 0 ] - current [ - 1 ] to_add = last if diff > 1 : pages . append ( None ) elif diff < 1 : to_add = last [ abs ( diff ) + 1 : ] pages . extend ( to_add ) if current_page != num_pages : pages . append ( 'next' ) if arrows : pages . append ( 'last' ) return pages
Default callable for page listing .
54,365
def _make_elastic_range ( begin , end ) : starting_factor = max ( 1 , ( end - begin ) // 100 ) factor = _iter_factors ( starting_factor ) left_half , right_half = [ ] , [ ] left_val , right_val = begin , end right_val = end while left_val < right_val : left_half . append ( left_val ) right_half . append ( right_val ) next_factor = next ( factor ) left_val = begin + next_factor right_val = end - next_factor if left_val == right_val : left_half . append ( left_val ) right_half . reverse ( ) return left_half + right_half
Generate an S - curved range of pages .
54,366
def get_elastic_page_numbers ( current_page , num_pages ) : if num_pages <= 10 : return list ( range ( 1 , num_pages + 1 ) ) if current_page == 1 : pages = [ 1 ] else : pages = [ 'first' , 'previous' ] pages . extend ( _make_elastic_range ( 1 , current_page ) ) if current_page != num_pages : pages . extend ( _make_elastic_range ( current_page , num_pages ) [ 1 : ] ) pages . extend ( [ 'next' , 'last' ] ) return pages
Alternative callable for page listing .
54,367
def get_prepopulated_value ( field , instance ) : if hasattr ( field . populate_from , '__call__' ) : return field . populate_from ( instance ) else : attr = getattr ( instance , field . populate_from ) return callable ( attr ) and attr ( ) or attr
Returns preliminary value based on populate_from .
54,368
def get_uniqueness_lookups ( field , instance , unique_with ) : for original_lookup_name in unique_with : if '__' in original_lookup_name : field_name , inner_lookup = original_lookup_name . split ( '__' , 1 ) else : field_name , inner_lookup = original_lookup_name , None try : other_field = instance . _meta . get_field ( field_name ) except FieldDoesNotExist : raise ValueError ( 'Could not find attribute %s.%s referenced' ' by %s.%s (see constraint `unique_with`)' % ( instance . _meta . object_name , field_name , instance . _meta . object_name , field . name ) ) if field == other_field : raise ValueError ( 'Attribute %s.%s references itself in `unique_with`.' ' Please use "unique=True" for this case.' % ( instance . _meta . object_name , field_name ) ) value = getattr ( instance , field_name ) if not value : if other_field . blank : field_object = instance . _meta . get_field ( field_name ) if isinstance ( field_object , ForeignKey ) : lookup = '%s__isnull' % field_name yield lookup , True break raise ValueError ( 'Could not check uniqueness of %s.%s with' ' respect to %s.%s because the latter is empty.' ' Please ensure that "%s" is declared *after*' ' all fields listed in unique_with.' % ( instance . _meta . object_name , field . name , instance . _meta . object_name , field_name , field . name ) ) if isinstance ( other_field , DateField ) : inner_lookup = inner_lookup or 'day' if '__' in inner_lookup : raise ValueError ( 'The `unique_with` constraint in %s.%s' ' is set to "%s", but AutoSlugField only' ' accepts one level of nesting for dates' ' (e.g. "date__month").' % ( instance . _meta . object_name , field . name , original_lookup_name ) ) parts = [ 'year' , 'month' , 'day' ] try : granularity = parts . index ( inner_lookup ) + 1 except ValueError : raise ValueError ( 'expected one of %s, got "%s" in "%s"' % ( parts , inner_lookup , original_lookup_name ) ) else : for part in parts [ : granularity ] : lookup = '%s__%s' % ( field_name , part ) yield lookup , getattr ( value , part ) else : if inner_lookup : if not hasattr ( value , '_meta' ) : raise ValueError ( 'Could not resolve lookup "%s" in `unique_with` of %s.%s' % ( original_lookup_name , instance . _meta . object_name , field . name ) ) for inner_name , inner_value in get_uniqueness_lookups ( field , value , [ inner_lookup ] ) : yield original_lookup_name , inner_value else : yield field_name , value
Returns a dict able tuple of lookups to ensure uniqueness of a slug .
54,369
def derivative_colors ( colors ) : return set ( [ ( 'on_' + c ) for c in colors ] + [ ( 'bright_' + c ) for c in colors ] + [ ( 'on_bright_' + c ) for c in colors ] )
Return the names of valid color variants given the base colors .
54,370
def split_into_formatters ( compound ) : merged_segs = [ ] mergeable_prefixes = [ 'no' , 'on' , 'bright' , 'on_bright' ] for s in compound . split ( '_' ) : if merged_segs and merged_segs [ - 1 ] in mergeable_prefixes : merged_segs [ - 1 ] += '_' + s else : merged_segs . append ( s ) return merged_segs
Split a possibly compound format string into segments .
54,371
def location ( self , x = None , y = None ) : self . stream . write ( self . save ) if x is not None and y is not None : self . stream . write ( self . move ( y , x ) ) elif x is not None : self . stream . write ( self . move_x ( x ) ) elif y is not None : self . stream . write ( self . move_y ( y ) ) try : yield finally : self . stream . write ( self . restore )
Return a context manager for temporarily moving the cursor .
54,372
def fullscreen ( self ) : self . stream . write ( self . enter_fullscreen ) try : yield finally : self . stream . write ( self . exit_fullscreen )
Return a context manager that enters fullscreen mode while inside it and restores normal mode on leaving .
54,373
def hidden_cursor ( self ) : self . stream . write ( self . hide_cursor ) try : yield finally : self . stream . write ( self . normal_cursor )
Return a context manager that hides the cursor while inside it and makes it visible on leaving .
54,374
def _resolve_formatter ( self , attr ) : if attr in COLORS : return self . _resolve_color ( attr ) elif attr in COMPOUNDABLES : return self . _formatting_string ( self . _resolve_capability ( attr ) ) else : formatters = split_into_formatters ( attr ) if all ( f in COMPOUNDABLES for f in formatters ) : return self . _formatting_string ( u'' . join ( self . _resolve_formatter ( s ) for s in formatters ) ) else : return ParametrizingString ( self . _resolve_capability ( attr ) )
Resolve a sugary or plain capability name color or compound formatting function name into a callable capability .
54,375
def _resolve_capability ( self , atom ) : code = tigetstr ( self . _sugar . get ( atom , atom ) ) if code : return code . decode ( 'latin1' ) return u''
Return a terminal code for a capname or a sugary name or an empty Unicode .
54,376
def _resolve_color ( self , color ) : color_cap = ( self . _background_color if 'on_' in color else self . _foreground_color ) offset = 8 if 'bright_' in color else 0 base_color = color . rsplit ( '_' , 1 ) [ - 1 ] return self . _formatting_string ( color_cap ( getattr ( curses , 'COLOR_' + base_color . upper ( ) ) + offset ) )
Resolve a color like red or on_bright_green into a callable capability .
54,377
def send_login_code ( self , code , context , ** kwargs ) : from_number = self . from_number or getattr ( settings , 'DEFAULT_FROM_NUMBER' ) sms_content = render_to_string ( self . template_name , context ) self . twilio_client . messages . create ( to = code . user . phone_number , from_ = from_number , body = sms_content )
Send a login code via SMS
54,378
def load ( filename , ** kwargs ) : with open ( filename , 'rb' ) as f : reader = T7Reader ( f , ** kwargs ) return reader . read_obj ( )
Loads the given t7 file using default settings ; kwargs are forwarded to T7Reader .
54,379
def check ( self ) : if self . lastrun + self . interval < time . time ( ) : return True else : return False
Returns True if interval seconds have passed since it last ran
54,380
def make_union ( * transformers , ** kwargs ) : n_jobs = kwargs . pop ( 'n_jobs' , 1 ) concatenate = kwargs . pop ( 'concatenate' , True ) if kwargs : raise TypeError ( 'Unknown keyword arguments: "{}"' . format ( list ( kwargs . keys ( ) ) [ 0 ] ) ) return FeatureUnion ( _name_estimators ( transformers ) , n_jobs = n_jobs , concatenate = concatenate )
Construct a FeatureUnion from the given transformers .
54,381
def get_feature_names ( self ) : feature_names = [ ] for name , trans , weight in self . _iter ( ) : if not hasattr ( trans , 'get_feature_names' ) : raise AttributeError ( "Transformer %s (type %s) does not " "provide get_feature_names." % ( str ( name ) , type ( trans ) . __name__ ) ) feature_names . extend ( [ name + "__" + f for f in trans . get_feature_names ( ) ] ) return feature_names
Get feature names from all transformers .
54,382
def fit ( self , X , y = None ) : self . transformer_list = list ( self . transformer_list ) self . _validate_transformers ( ) with Pool ( self . n_jobs ) as pool : transformers = pool . starmap ( _fit_one_transformer , ( ( trans , X [ trans [ 'col_pick' ] ] if hasattr ( trans , 'col_pick' ) else X , y ) for _ , trans , _ in self . _iter ( ) ) ) self . _update_transformer_list ( transformers ) return self
Fit all transformers using X .
54,383
def fit_transform ( self , X , y = None , ** fit_params ) : self . _validate_transformers ( ) with Pool ( self . n_jobs ) as pool : result = pool . starmap ( _fit_transform_one , ( ( trans , weight , X [ trans [ 'col_pick' ] ] if hasattr ( trans , 'col_pick' ) else X , y ) for name , trans , weight in self . _iter ( ) ) ) if not result : return np . zeros ( ( X . shape [ 0 ] , 0 ) ) Xs , transformers = zip ( * result ) self . _update_transformer_list ( transformers ) if self . concatenate : if any ( sparse . issparse ( f ) for f in Xs ) : Xs = sparse . hstack ( Xs ) . tocsr ( ) else : Xs = np . hstack ( Xs ) return Xs
Fit all transformers transform the data and concatenate results .
54,384
def transform ( self , X ) : with Pool ( self . n_jobs ) as pool : Xs = pool . starmap ( _transform_one , ( ( trans , weight , X [ trans [ 'col_pick' ] ] if hasattr ( trans , 'col_pick' ) else X ) for name , trans , weight in self . _iter ( ) ) ) if not Xs : return np . zeros ( ( X . shape [ 0 ] , 0 ) ) if self . concatenate : if any ( sparse . issparse ( f ) for f in Xs ) : Xs = sparse . hstack ( Xs ) . tocsr ( ) else : Xs = np . hstack ( Xs ) return Xs
Transform X separately by each transformer concatenate results .
54,385
def split_batches ( self , data , minibatch_size = None ) : if minibatch_size == None : minibatch_size = self . minibatch_size if isinstance ( data , list ) or isinstance ( data , tuple ) : len_data = len ( data ) else : len_data = data . shape [ 0 ] if isinstance ( data , pd . DataFrame ) : data_split = [ data . iloc [ x * minibatch_size : ( x + 1 ) * minibatch_size ] for x in range ( int ( ceil ( len_data / minibatch_size ) ) ) ] else : data_split = [ data [ x * minibatch_size : min ( len_data , ( x + 1 ) * minibatch_size ) ] for x in range ( int ( ceil ( len_data / minibatch_size ) ) ) ] return data_split
Split data into minibatches with a specified size
54,386
def merge_batches ( self , data ) : if isinstance ( data [ 0 ] , ssp . csr_matrix ) : return ssp . vstack ( data ) if isinstance ( data [ 0 ] , pd . DataFrame ) or isinstance ( data [ 0 ] , pd . Series ) : return pd . concat ( data ) return [ item for sublist in data for item in sublist ]
Merge a list of data minibatches into one single instance representing the data
54,387
def shuffle_batch ( self , texts , labels = None , seed = None ) : if seed != None : random . seed ( seed ) index_shuf = list ( range ( len ( texts ) ) ) random . shuffle ( index_shuf ) texts = [ texts [ x ] for x in index_shuf ] if labels == None : return texts labels = [ labels [ x ] for x in index_shuf ] return texts , labels
Shuffle a list of samples as well as the labels if specified
54,388
def demeshgrid ( arr ) : dim = len ( arr . shape ) for i in range ( dim ) : Slice1 = [ 0 ] * dim Slice2 = [ 1 ] * dim Slice1 [ i ] = slice ( None ) Slice2 [ i ] = slice ( None ) if ( arr [ tuple ( Slice1 ) ] == arr [ tuple ( Slice2 ) ] ) . all ( ) : return arr [ tuple ( Slice1 ) ]
Turns an ndarray created by a meshgrid back into a 1D array
54,389
def timeline_slider ( self , text = 'Time' , ax = None , valfmt = None , color = None ) : if ax is None : adjust_plot = { 'bottom' : .2 } rect = [ .18 , .05 , .5 , .03 ] plt . subplots_adjust ( ** adjust_plot ) self . slider_ax = plt . axes ( rect ) else : self . slider_ax = ax if valfmt is None : if ( np . issubdtype ( self . timeline . t . dtype , np . datetime64 ) or np . issubdtype ( self . timeline . t . dtype , np . timedelta64 ) ) : valfmt = '%s' else : valfmt = '%1.2f' if self . timeline . log : valfmt = '$10^{%s}$' % valfmt self . slider = Slider ( self . slider_ax , text , 0 , self . timeline . _len - 1 , valinit = 0 , valfmt = ( valfmt + self . timeline . units ) , valstep = 1 , color = color ) self . _has_slider = True def set_time ( t ) : self . timeline . index = int ( self . slider . val ) self . slider . valtext . set_text ( self . slider . valfmt % ( self . timeline [ self . timeline . index ] ) ) if self . _pause : for block in self . blocks : block . _update ( self . timeline . index ) self . fig . canvas . draw ( ) self . slider . on_changed ( set_time )
Creates a timeline slider .
54,390
def controls ( self , timeline_slider_args = { } , toggle_args = { } ) : self . timeline_slider ( ** timeline_slider_args ) self . toggle ( ** toggle_args )
Creates interactive controls for the animation
54,391
def save_gif ( self , filename ) : self . timeline . index -= 1 self . animation . save ( filename + '.gif' , writer = PillowWriter ( fps = self . timeline . fps ) )
Saves the animation to a gif
54,392
def save ( self , * args , ** kwargs ) : self . timeline . index -= 1 self . animation . save ( * args , ** kwargs )
Saves an animation
54,393
def isin_alone ( elems , line ) : found = False for e in elems : if line . strip ( ) . lower ( ) == e . lower ( ) : found = True break return found
Check if an element from a list is the only element of a string .
54,394
def isin_start ( elems , line ) : found = False elems = [ elems ] if type ( elems ) is not list else elems for e in elems : if line . lstrip ( ) . lower ( ) . startswith ( e ) : found = True break return found
Check if an element from a list starts a string .
54,395
def isin ( elems , line ) : found = False for e in elems : if e in line . lower ( ) : found = True break return found
Check if an element from a list is in a string .
54,396
def get_leading_spaces ( data ) : spaces = '' m = re . match ( r'^(\s*)' , data ) if m : spaces = m . group ( 1 ) return spaces
Get the leading space of a string if it is not empty
54,397
def get_mandatory_sections ( self ) : return [ s for s in self . opt if s not in self . optional_sections and s not in self . excluded_sections ]
Get mandatory sections
54,398
def get_raw_not_managed ( self , data ) : keys = [ 'also' , 'ref' , 'note' , 'other' , 'example' , 'method' , 'attr' ] elems = [ self . opt [ k ] for k in self . opt if k in keys ] data = data . splitlines ( ) start = 0 init = 0 raw = '' spaces = None while start != - 1 : start , end = self . get_next_section_lines ( data [ init : ] ) if start != - 1 : init += start if isin_alone ( elems , data [ init ] ) and not isin_alone ( [ self . opt [ e ] for e in self . excluded_sections ] , data [ init ] ) : spaces = get_leading_spaces ( data [ init ] ) if end != - 1 : section = [ d . replace ( spaces , '' , 1 ) . rstrip ( ) for d in data [ init : init + end ] ] else : section = [ d . replace ( spaces , '' , 1 ) . rstrip ( ) for d in data [ init : ] ] raw += '\n' . join ( section ) + '\n' init += 2 return raw
Get elements not managed . They can be used as is .
54,399
def get_key_section_header ( self , key , spaces ) : header = super ( NumpydocTools , self ) . get_key_section_header ( key , spaces ) header = spaces + header + '\n' + spaces + '-' * len ( header ) + '\n' return header
Get the key of the header section