idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
58,400 | def to_iso ( dt ) : if isinstance ( dt , datetime ) : return to_iso_datetime ( dt ) elif isinstance ( dt , date ) : return to_iso_date ( dt ) | Format a date or datetime into an ISO - 8601 string |
58,401 | def to_iso_datetime ( dt ) : if dt : date_str = to_iso_date ( dt ) time_str = '{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}' . format ( dt = dt ) if isinstance ( dt , datetime ) else '00:00:00' return 'T' . join ( ( date_str , time_str ) ) | Format a date or datetime into an ISO - 8601 datetime string . |
58,402 | def recursive_get ( obj , key ) : if not obj or not key : return parts = key . split ( '.' ) if isinstance ( key , basestring ) else key key = parts . pop ( 0 ) if isinstance ( obj , dict ) : value = obj . get ( key , None ) else : value = getattr ( obj , key , None ) return recursive_get ( value , parts ) if parts else value | Get an attribute or a key recursively . |
58,403 | def unique_string ( length = UUID_LENGTH ) : string = str ( uuid4 ( ) ) * int ( math . ceil ( length / float ( UUID_LENGTH ) ) ) return string [ : length ] if length else string | Generate a unique string |
58,404 | def safe_unicode ( string ) : if not isinstance ( string , basestring ) : string = unicode ( string ) if isinstance ( string , unicode ) : string = string . encode ( 'utf8' ) return string | Safely transform any object into utf8 encoded bytes |
58,405 | def redirect_territory ( level , code ) : territory = GeoZone . objects . valid_at ( datetime . now ( ) ) . filter ( code = code , level = 'fr:{level}' . format ( level = level ) ) . first ( ) return redirect ( url_for ( 'territories.territory' , territory = territory ) ) | Implicit redirect given the INSEE code . |
58,406 | def scheduled ( ) : for job in sorted ( schedulables ( ) , key = lambda s : s . name ) : for task in PeriodicTask . objects ( task = job . name ) : label = job_label ( task . task , task . args , task . kwargs ) echo ( SCHEDULE_LINE . format ( name = white ( task . name . encode ( 'utf8' ) ) , label = label , schedule = task . schedule_display ) . encode ( 'utf8' ) ) | List scheduled jobs . |
58,407 | def purge ( datasets , reuses , organizations ) : purge_all = not any ( ( datasets , reuses , organizations ) ) if purge_all or datasets : log . info ( 'Purging datasets' ) purge_datasets ( ) if purge_all or reuses : log . info ( 'Purging reuses' ) purge_reuses ( ) if purge_all or organizations : log . info ( 'Purging organizations' ) purge_organizations ( ) success ( 'Done' ) | Permanently remove data flagged as deleted . |
58,408 | def clean_parameters ( self , params ) : return { k : v for k , v in params . items ( ) if k in self . adapter . facets } | Only keep known parameters |
58,409 | def extract_sort ( self , params ) : sorts = params . pop ( 'sort' , [ ] ) sorts = [ sorts ] if isinstance ( sorts , basestring ) else sorts sorts = [ ( s [ 1 : ] , 'desc' ) if s . startswith ( '-' ) else ( s , 'asc' ) for s in sorts ] self . sorts = [ { self . adapter . sorts [ s ] : d } for s , d in sorts if s in self . adapter . sorts ] | Extract and build sort query from parameters |
58,410 | def extract_pagination ( self , params ) : try : params_page = int ( params . pop ( 'page' , 1 ) or 1 ) self . page = max ( params_page , 1 ) except : self . page = 1 try : params_page_size = params . pop ( 'page_size' , DEFAULT_PAGE_SIZE ) self . page_size = int ( params_page_size or DEFAULT_PAGE_SIZE ) except : self . page_size = DEFAULT_PAGE_SIZE self . page_start = ( self . page - 1 ) * self . page_size self . page_end = self . page_start + self . page_size | Extract and build pagination from parameters |
58,411 | def aggregate ( self , search ) : for f , facet in self . facets . items ( ) : agg = facet . get_aggregation ( ) if isinstance ( agg , Bucket ) : search . aggs . bucket ( f , agg ) elif isinstance ( agg , Pipeline ) : search . aggs . pipeline ( f , agg ) else : search . aggs . metric ( f , agg ) | Add aggregations representing the facets selected |
58,412 | def filter ( self , search ) : if not self . _filters : return search filters = Q ( 'match_all' ) for f in self . _filters . values ( ) : filters &= f return search . filter ( filters ) | Perform filtering instead of default post - filtering . |
58,413 | def query ( self , search , query ) : if not query : return search included , excluded = [ ] , [ ] for term in query . split ( ' ' ) : if not term . strip ( ) : continue if term . startswith ( '-' ) : excluded . append ( term [ 1 : ] ) else : included . append ( term ) if included : search = search . query ( self . multi_match ( included ) ) for term in excluded : search = search . query ( ~ self . multi_match ( [ term ] ) ) return search | Customize the search query if necessary . |
58,414 | def to_url ( self , url = None , replace = False , ** kwargs ) : params = copy . deepcopy ( self . filter_values ) if self . _query : params [ 'q' ] = self . _query if self . page_size != DEFAULT_PAGE_SIZE : params [ 'page_size' ] = self . page_size if kwargs : for key , value in kwargs . items ( ) : if not replace and key in params : if not isinstance ( params [ key ] , ( list , tuple ) ) : params [ key ] = [ params [ key ] , value ] else : params [ key ] . append ( value ) else : params [ key ] = value else : params [ 'page' ] = self . page href = Href ( url or request . base_url ) return href ( params ) | Serialize the query into an URL |
58,415 | def safestr ( value ) : if not value or isinstance ( value , ( int , float , bool , long ) ) : return value elif isinstance ( value , ( date , datetime ) ) : return value . isoformat ( ) else : return unicode ( value ) | Ensure type to string serialization |
58,416 | def yield_rows ( adapter ) : csvfile = StringIO ( ) writer = get_writer ( csvfile ) writer . writerow ( adapter . header ( ) ) yield csvfile . getvalue ( ) del csvfile for row in adapter . rows ( ) : csvfile = StringIO ( ) writer = get_writer ( csvfile ) writer . writerow ( row ) yield csvfile . getvalue ( ) del csvfile | Yield a dataset catalog line by line |
58,417 | def stream ( queryset_or_adapter , basename = None ) : if isinstance ( queryset_or_adapter , Adapter ) : adapter = queryset_or_adapter elif isinstance ( queryset_or_adapter , ( list , tuple ) ) : if not queryset_or_adapter : raise ValueError ( 'Type detection is not possible with an empty list' ) cls = _adapters . get ( queryset_or_adapter [ 0 ] . __class__ ) adapter = cls ( queryset_or_adapter ) elif isinstance ( queryset_or_adapter , db . BaseQuerySet ) : cls = _adapters . get ( queryset_or_adapter . _document ) adapter = cls ( queryset_or_adapter ) else : raise ValueError ( 'Unsupported object type' ) timestamp = datetime . now ( ) . strftime ( '%Y-%m-%d-%H-%M' ) headers = { b'Content-Disposition' : 'attachment; filename={0}-{1}.csv' . format ( basename or 'export' , timestamp ) , } streamer = stream_with_context ( yield_rows ( adapter ) ) return Response ( streamer , mimetype = "text/csv" , headers = headers ) | Stream a csv file from an object list |
58,418 | def header ( self ) : return ( super ( NestedAdapter , self ) . header ( ) + [ name for name , getter in self . get_nested_fields ( ) ] ) | Generate the CSV header row |
58,419 | def rows ( self ) : return ( self . nested_row ( o , n ) for o in self . queryset for n in getattr ( o , self . attribute , [ ] ) ) | Iterate over queryset objects |
58,420 | def nested_row ( self , obj , nested ) : row = self . to_row ( obj ) for name , getter in self . get_nested_fields ( ) : content = '' if getter is not None : try : content = safestr ( getter ( nested ) ) except Exception , e : log . error ( 'Error exporting CSV for {name}: {error}' . format ( name = self . __class__ . __name__ , error = e ) ) row . append ( content ) return row | Convert an object into a flat csv row |
58,421 | def transfer_request_notifications ( user ) : orgs = [ o for o in user . organizations if o . is_member ( user ) ] notifications = [ ] qs = Transfer . objects ( recipient__in = [ user ] + orgs , status = 'pending' ) qs = qs . only ( 'id' , 'created' , 'subject' ) for transfer in qs . no_dereference ( ) : notifications . append ( ( transfer . created , { 'id' : transfer . id , 'subject' : { 'class' : transfer . subject [ '_cls' ] . lower ( ) , 'id' : transfer . subject [ '_ref' ] . id } } ) ) return notifications | Notify user about pending transfer requests |
58,422 | def send ( subject , recipients , template_base , ** kwargs ) : sender = kwargs . pop ( 'sender' , None ) if not isinstance ( recipients , ( list , tuple ) ) : recipients = [ recipients ] debug = current_app . config . get ( 'DEBUG' , False ) send_mail = current_app . config . get ( 'SEND_MAIL' , not debug ) connection = send_mail and mail . connect or dummyconnection with connection ( ) as conn : for recipient in recipients : lang = i18n . _default_lang ( recipient ) with i18n . language ( lang ) : log . debug ( 'Sending mail "%s" to recipient "%s"' , subject , recipient ) msg = Message ( subject , sender = sender , recipients = [ recipient . email ] ) msg . body = theme . render ( 'mail/{0}.txt' . format ( template_base ) , subject = subject , sender = sender , recipient = recipient , ** kwargs ) msg . html = theme . render ( 'mail/{0}.html' . format ( template_base ) , subject = subject , sender = sender , recipient = recipient , ** kwargs ) conn . send ( msg ) | Send a given email to multiple recipients . |
58,423 | def public_dsn ( dsn ) : m = RE_DSN . match ( dsn ) if not m : log . error ( 'Unable to parse Sentry DSN' ) public = '{scheme}://{client_id}@{domain}/{site_id}' . format ( ** m . groupdict ( ) ) return public | Transform a standard Sentry DSN into a public one |
58,424 | def update ( ctx , migrate = False ) : msg = 'Update all dependencies' if migrate : msg += ' and migrate data' header ( msg ) info ( 'Updating Python dependencies' ) lrun ( 'pip install -r requirements/develop.pip' ) lrun ( 'pip install -e .' ) info ( 'Updating JavaScript dependencies' ) lrun ( 'npm install' ) if migrate : info ( 'Migrating database' ) lrun ( 'udata db migrate' ) | Perform a development update |
58,425 | def i18n ( ctx , update = False ) : header ( 'Extract translatable strings' ) info ( 'Extract Python strings' ) lrun ( 'python setup.py extract_messages' ) potfile = join ( ROOT , 'udata' , 'translations' , '{}.pot' . format ( I18N_DOMAIN ) ) with open ( potfile , 'rb' ) as infile : catalog = read_po ( infile , 'en' ) catalog . copyright_holder = 'Open Data Team' catalog . msgid_bugs_address = 'i18n@opendata.team' catalog . language_team = 'Open Data Team <i18n@opendata.team>' catalog . last_translator = 'Open Data Team <i18n@opendata.team>' catalog . revision_date = datetime . now ( LOCALTZ ) with open ( potfile , 'wb' ) as outfile : write_po ( outfile , catalog , width = 80 ) if update : lrun ( 'python setup.py update_catalog' ) info ( 'Extract JavaScript strings' ) keys = set ( ) catalog = { } catalog_filename = join ( ROOT , 'js' , 'locales' , '{}.en.json' . format ( I18N_DOMAIN ) ) if exists ( catalog_filename ) : with codecs . open ( catalog_filename , encoding = 'utf8' ) as f : catalog = json . load ( f ) globs = '*.js' , '*.vue' , '*.hbs' regexps = [ re . compile ( r'(?:|\.|\s|\{)_\(\s*(?:"|\')(.*?)(?:"|\')\s*(?:\)|,)' ) , re . compile ( r'v-i18n="(.*?)"' ) , re . compile ( r'"\{\{\{?\s*\'(.*?)\'\s*\|\s*i18n\}\}\}?"' ) , re . compile ( r'{{_\s*"(.*?)"\s*}}' ) , re . compile ( r'{{_\s*\'(.*?)\'\s*}}' ) , re . compile ( r'\:[a-z0-9_\-]+="\s*_\(\'(.*?)\'\)\s*"' ) , ] for directory , _ , _ in os . walk ( join ( ROOT , 'js' ) ) : glob_patterns = ( iglob ( join ( directory , g ) ) for g in globs ) for filename in itertools . chain ( * glob_patterns ) : print ( 'Extracting messages from {0}' . format ( green ( filename ) ) ) content = codecs . open ( filename , encoding = 'utf8' ) . read ( ) for regexp in regexps : for match in regexp . finditer ( content ) : key = match . group ( 1 ) key = key . replace ( '\\n' , '\n' ) keys . add ( key ) if key not in catalog : catalog [ key ] = key for key in catalog . keys ( ) : if key not in keys : del catalog [ key ] with codecs . open ( catalog_filename , 'w' , encoding = 'utf8' ) as f : json . dump ( catalog , f , sort_keys = True , indent = 4 , ensure_ascii = False , encoding = 'utf8' , separators = ( ',' , ': ' ) ) | Extract translatable strings |
58,426 | def output_json ( data , code , headers = None ) : resp = make_response ( json . dumps ( data ) , code ) resp . headers . extend ( headers or { } ) return resp | Use Flask JSON to serialize |
58,427 | def extract_name_from_path ( path ) : base_path , query_string = path . split ( '?' ) infos = base_path . strip ( '/' ) . split ( '/' ) [ 2 : ] if len ( infos ) > 1 : name = '{category} / {name}' . format ( category = infos [ 0 ] . title ( ) , name = infos [ 1 ] . replace ( '-' , ' ' ) . title ( ) ) else : name = '{category}' . format ( category = infos [ 0 ] . title ( ) ) return safe_unicode ( name ) | Return a readable name from a URL path . |
58,428 | def handle_unauthorized_file_type ( error ) : url = url_for ( 'api.allowed_extensions' , _external = True ) msg = ( 'This file type is not allowed.' 'The allowed file type list is available at {url}' ) . format ( url = url ) return { 'message' : msg } , 400 | Error occuring when the user try to upload a non - allowed file type |
58,429 | def authentify ( self , func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : if current_user . is_authenticated : return func ( * args , ** kwargs ) apikey = request . headers . get ( HEADER_API_KEY ) if apikey : try : user = User . objects . get ( apikey = apikey ) except User . DoesNotExist : self . abort ( 401 , 'Invalid API Key' ) if not login_user ( user , False ) : self . abort ( 401 , 'Inactive user' ) else : oauth2 . check_credentials ( ) return func ( * args , ** kwargs ) return wrapper | Authentify the user if credentials are given |
58,430 | def validate ( self , form_cls , obj = None ) : if 'application/json' not in request . headers . get ( 'Content-Type' ) : errors = { 'Content-Type' : 'expecting application/json' } self . abort ( 400 , errors = errors ) form = form_cls . from_json ( request . json , obj = obj , instance = obj , csrf_enabled = False ) if not form . validate ( ) : self . abort ( 400 , errors = form . errors ) return form | Validate a form from the request and handle errors |
58,431 | def unauthorized ( self , response ) : realm = current_app . config . get ( 'HTTP_OAUTH_REALM' , 'uData' ) challenge = 'Bearer realm="{0}"' . format ( realm ) response . headers [ 'WWW-Authenticate' ] = challenge return response | Override to change the WWW - Authenticate challenge |
58,432 | def get ( self , id ) : args = parser . parse_args ( ) model = self . model . objects . only ( 'id' ) . get_or_404 ( id = id ) qs = Follow . objects ( following = model , until = None ) return qs . paginate ( args [ 'page' ] , args [ 'page_size' ] ) | List all followers for a given object |
58,433 | def post ( self , id ) : model = self . model . objects . only ( 'id' ) . get_or_404 ( id = id ) follow , created = Follow . objects . get_or_create ( follower = current_user . id , following = model , until = None ) count = Follow . objects . followers ( model ) . count ( ) if not current_app . config [ 'TESTING' ] : tracking . send_signal ( on_new_follow , request , current_user ) return { 'followers' : count } , 201 if created else 200 | Follow an object given its ID |
58,434 | def delete ( self , id ) : model = self . model . objects . only ( 'id' ) . get_or_404 ( id = id ) follow = Follow . objects . get_or_404 ( follower = current_user . id , following = model , until = None ) follow . until = datetime . now ( ) follow . save ( ) count = Follow . objects . followers ( model ) . count ( ) return { 'followers' : count } , 200 | Unfollow an object given its ID |
58,435 | def error ( msg , details = None ) : msg = '{0} {1}' . format ( red ( KO ) , white ( safe_unicode ( msg ) ) ) msg = safe_unicode ( msg ) if details : msg = b'\n' . join ( ( msg , safe_unicode ( details ) ) ) echo ( format_multiline ( msg ) ) | Display an error message with optional details |
58,436 | def main ( self , * args , ** kwargs ) : obj = kwargs . get ( 'obj' ) if obj is None : obj = ScriptInfo ( create_app = self . create_app ) obj . settings = kwargs . pop ( 'settings' , 'udata.settings.Defaults' ) kwargs [ 'obj' ] = obj return super ( UdataGroup , self ) . main ( * args , ** kwargs ) | Instanciate ScriptInfo before parent does to ensure the settings parameters is available to create_app |
58,437 | def get_plugins_dists ( app , name = None ) : if name : plugins = set ( e . name for e in iter_all ( name ) if e . name in app . config [ 'PLUGINS' ] ) else : plugins = set ( app . config [ 'PLUGINS' ] ) return [ d for d in known_dists ( ) if any ( set ( v . keys ( ) ) & plugins for v in d . get_entry_map ( ) . values ( ) ) ] | Return a list of Distributions with enabled udata plugins |
58,438 | def lazy_raise_or_redirect ( ) : if not request . view_args : return for name , value in request . view_args . items ( ) : if isinstance ( value , NotFound ) : request . routing_exception = value break elif isinstance ( value , LazyRedirect ) : new_args = request . view_args new_args [ name ] = value . arg new_url = url_for ( request . endpoint , ** new_args ) return redirect ( new_url ) | Raise exception lazily to ensure request . endpoint is set Also perform redirect if needed |
58,439 | def to_python ( self , value ) : if '/' not in value : return level , code = value . split ( '/' ) [ : 2 ] geoid = GeoZone . SEPARATOR . join ( [ level , code ] ) zone = GeoZone . objects . resolve ( geoid ) if not zone and GeoZone . SEPARATOR not in level : level = GeoZone . SEPARATOR . join ( [ self . DEFAULT_PREFIX , level ] ) geoid = GeoZone . SEPARATOR . join ( [ level , code ] ) zone = GeoZone . objects . resolve ( geoid ) return zone or NotFound ( ) | value has slashs in it that s why we inherit from PathConverter . |
58,440 | def to_url ( self , obj ) : level_name = getattr ( obj , 'level_name' , None ) if not level_name : raise ValueError ( 'Unable to serialize "%s" to url' % obj ) code = getattr ( obj , 'code' , None ) slug = getattr ( obj , 'slug' , None ) validity = getattr ( obj , 'validity' , None ) if code and slug : return '{level_name}/{code}@{start_date}/{slug}' . format ( level_name = level_name , code = code , start_date = getattr ( validity , 'start' , None ) or 'latest' , slug = slug ) else : raise ValueError ( 'Unable to serialize "%s" to url' % obj ) | Reconstruct the URL from level name code or datagouv id and slug . |
58,441 | def job ( name , ** kwargs ) : return task ( name = name , schedulable = True , base = JobTask , bind = True , ** kwargs ) | A shortcut decorator for declaring jobs |
58,442 | def apply_async ( self , entry , ** kwargs ) : result = super ( Scheduler , self ) . apply_async ( entry , ** kwargs ) entry . _task . last_run_id = result . id return result | A MongoScheduler storing the last task_id |
58,443 | def config ( ) : if hasattr ( current_app , 'settings_file' ) : log . info ( 'Loaded configuration from %s' , current_app . settings_file ) log . info ( white ( 'Current configuration' ) ) for key in sorted ( current_app . config ) : if key . startswith ( '__' ) or not key . isupper ( ) : continue echo ( '{0}: {1}' . format ( white ( key ) , current_app . config [ key ] ) ) | Display some details about the local configuration |
58,444 | def plugins ( ) : plugins = current_app . config [ 'PLUGINS' ] for name , description in entrypoints . ENTRYPOINTS . items ( ) : echo ( '{0} ({1})' . format ( white ( description ) , name ) ) if name == 'udata.themes' : actives = [ current_app . config [ 'THEME' ] ] elif name == 'udata.avatars' : actives = [ avatar_config ( 'provider' ) ] else : actives = plugins for ep in sorted ( entrypoints . iter_all ( name ) , key = by_name ) : echo ( '> {0}: {1}' . format ( ep . name , is_active ( ep , actives ) ) ) | Display some details about the local plugins |
58,445 | def can ( self , * args , ** kwargs ) : if isinstance ( self . require , auth . Permission ) : return self . require . can ( ) elif callable ( self . require ) : return self . require ( ) elif isinstance ( self . require , bool ) : return self . require else : return True | Overwrite this method to implement custom contextual permissions |
58,446 | def extension ( filename ) : filename = os . path . basename ( filename ) extension = None while '.' in filename : filename , ext = os . path . splitext ( filename ) if ext . startswith ( '.' ) : ext = ext [ 1 : ] extension = ext if not extension else ext + '.' + extension return extension | Properly extract the extension from filename |
58,447 | def theme_static_with_version ( ctx , filename , external = False ) : if current_app . theme_manager . static_folder : url = assets . cdn_for ( '_themes.static' , filename = current . identifier + '/' + filename , _external = external ) else : url = assets . cdn_for ( '_themes.static' , themeid = current . identifier , filename = filename , _external = external ) if url . endswith ( '/' ) : return url if current_app . config [ 'DEBUG' ] : burst = time ( ) else : burst = current . entrypoint . dist . version return '{url}?_={burst}' . format ( url = url , burst = burst ) | Override the default theme static to add cache burst |
58,448 | def render ( template , ** context ) : theme = current_app . config [ 'THEME' ] return render_theme_template ( get_theme ( theme ) , template , ** context ) | Render a template with uData frontend specifics |
58,449 | def context ( name ) : def wrapper ( func ) : g . theme . context_processors [ name ] = func return func return wrapper | A decorator for theme context processors |
58,450 | def variant ( self ) : variant = current_app . config [ 'THEME_VARIANT' ] if variant not in self . variants : log . warning ( 'Unkown theme variant: %s' , variant ) return 'default' else : return variant | Get the current theme variant |
58,451 | def resource_redirect ( id ) : resource = get_resource ( id ) return redirect ( resource . url . strip ( ) ) if resource else abort ( 404 ) | Redirect to the latest version of a resource given its identifier . |
58,452 | def group_resources_by_type ( resources ) : groups = defaultdict ( list ) for resource in resources : groups [ getattr ( resource , 'type' ) ] . append ( resource ) ordered = OrderedDict ( ) for rtype , rtype_label in RESOURCE_TYPES . items ( ) : if groups [ rtype ] : ordered [ ( rtype , rtype_label ) ] = groups [ rtype ] return ordered | Group a list of resources by type with order |
58,453 | def aggregate ( self , start , end ) : last = self . objects ( level = 'daily' , date__lte = self . iso ( end ) , date__gte = self . iso ( start ) ) . order_by ( '-date' ) . first ( ) return last . values [ self . name ] | This method encpsualte the metric aggregation logic . Override this method when you inherit this class . By default it takes the last value . |
58,454 | def paginate_sources ( owner = None , page = 1 , page_size = DEFAULT_PAGE_SIZE ) : sources = _sources_queryset ( owner = owner ) page = max ( page or 1 , 1 ) return sources . paginate ( page , page_size ) | Paginate harvest sources |
58,455 | def update_source ( ident , data ) : source = get_source ( ident ) source . modify ( ** data ) signals . harvest_source_updated . send ( source ) return source | Update an harvest source |
58,456 | def validate_source ( ident , comment = None ) : source = get_source ( ident ) source . validation . on = datetime . now ( ) source . validation . comment = comment source . validation . state = VALIDATION_ACCEPTED if current_user . is_authenticated : source . validation . by = current_user . _get_current_object ( ) source . save ( ) schedule ( ident , cron = current_app . config [ 'HARVEST_DEFAULT_SCHEDULE' ] ) launch ( ident ) return source | Validate a source for automatic harvesting |
58,457 | def reject_source ( ident , comment ) : source = get_source ( ident ) source . validation . on = datetime . now ( ) source . validation . comment = comment source . validation . state = VALIDATION_REFUSED if current_user . is_authenticated : source . validation . by = current_user . _get_current_object ( ) source . save ( ) return source | Reject a source for automatic harvesting |
58,458 | def delete_source ( ident ) : source = get_source ( ident ) source . deleted = datetime . now ( ) source . save ( ) signals . harvest_source_deleted . send ( source ) return source | Delete an harvest source |
58,459 | def run ( ident ) : source = get_source ( ident ) cls = backends . get ( current_app , source . backend ) backend = cls ( source ) backend . harvest ( ) | Launch or resume an harvesting for a given source if none is running |
58,460 | def preview ( ident ) : source = get_source ( ident ) cls = backends . get ( current_app , source . backend ) max_items = current_app . config [ 'HARVEST_PREVIEW_MAX_ITEMS' ] backend = cls ( source , dryrun = True , max_items = max_items ) return backend . harvest ( ) | Preview an harvesting for a given source |
58,461 | def preview_from_config ( name , url , backend , description = None , frequency = DEFAULT_HARVEST_FREQUENCY , owner = None , organization = None , config = None , ) : if owner and not isinstance ( owner , User ) : owner = User . get ( owner ) if organization and not isinstance ( organization , Organization ) : organization = Organization . get ( organization ) source = HarvestSource ( name = name , url = url , backend = backend , description = description , frequency = frequency or DEFAULT_HARVEST_FREQUENCY , owner = owner , organization = organization , config = config , ) cls = backends . get ( current_app , source . backend ) max_items = current_app . config [ 'HARVEST_PREVIEW_MAX_ITEMS' ] backend = cls ( source , dryrun = True , max_items = max_items ) return backend . harvest ( ) | Preview an harvesting from a source created with the given parameters |
58,462 | def schedule ( ident , cron = None , minute = '*' , hour = '*' , day_of_week = '*' , day_of_month = '*' , month_of_year = '*' ) : source = get_source ( ident ) if cron : minute , hour , day_of_month , month_of_year , day_of_week = cron . split ( ) crontab = PeriodicTask . Crontab ( minute = str ( minute ) , hour = str ( hour ) , day_of_week = str ( day_of_week ) , day_of_month = str ( day_of_month ) , month_of_year = str ( month_of_year ) ) if source . periodic_task : source . periodic_task . modify ( crontab = crontab ) else : source . modify ( periodic_task = PeriodicTask . objects . create ( task = 'harvest' , name = 'Harvest {0}' . format ( source . name ) , description = 'Periodic Harvesting' , enabled = True , args = [ str ( source . id ) ] , crontab = crontab , ) ) signals . harvest_source_scheduled . send ( source ) return source | Schedule an harvesting on a source given a crontab |
58,463 | def unschedule ( ident ) : source = get_source ( ident ) if not source . periodic_task : msg = 'Harvesting on source {0} is ot scheduled' . format ( source . name ) raise ValueError ( msg ) source . periodic_task . delete ( ) signals . harvest_source_unscheduled . send ( source ) return source | Unschedule an harvesting on a source |
58,464 | def attach ( domain , filename ) : count = 0 errors = 0 with open ( filename ) as csvfile : reader = csv . DictReader ( csvfile , delimiter = b';' , quotechar = b'"' ) for row in reader : try : dataset = Dataset . objects . get ( id = ObjectId ( row [ 'local' ] ) ) except : log . warning ( 'Unable to attach dataset : %s' , row [ 'local' ] ) errors += 1 continue Dataset . objects ( ** { 'extras__harvest:domain' : domain , 'extras__harvest:remote_id' : row [ 'remote' ] } ) . update ( ** { 'unset__extras__harvest:domain' : True , 'unset__extras__harvest:remote_id' : True } ) dataset . extras [ 'harvest:domain' ] = domain dataset . extras [ 'harvest:remote_id' ] = row [ 'remote' ] dataset . last_modified = datetime . now ( ) dataset . save ( ) count += 1 return AttachResult ( count , errors ) | Attach existing dataset to their harvest remote id before harvesting . |
58,465 | def register ( self , key , dbtype ) : if not issubclass ( dbtype , ( BaseField , EmbeddedDocument ) ) : msg = 'ExtrasField can only register MongoEngine fields' raise TypeError ( msg ) self . registered [ key ] = dbtype | Register a DB type to add constraint on a given extra key |
58,466 | def delete ( ) : email = click . prompt ( 'Email' ) user = User . objects ( email = email ) . first ( ) if not user : exit_with_error ( 'Invalid user' ) user . delete ( ) success ( 'User deleted successfully' ) | Delete an existing user |
58,467 | def set_admin ( email ) : user = datastore . get_user ( email ) log . info ( 'Adding admin role to user %s (%s)' , user . fullname , user . email ) role = datastore . find_or_create_role ( 'admin' ) datastore . add_role_to_user ( user , role ) success ( 'User %s (%s) is now administrator' % ( user . fullname , user . email ) ) | Set an user as administrator |
58,468 | def combine_chunks ( storage , args , prefix = None ) : uuid = args [ 'uuid' ] target = utils . normalize ( args [ 'filename' ] ) if prefix : target = os . path . join ( prefix , target ) with storage . open ( target , 'wb' ) as out : for i in xrange ( args [ 'totalparts' ] ) : partname = chunk_filename ( uuid , i ) out . write ( chunks . read ( partname ) ) chunks . delete ( partname ) chunks . delete ( chunk_filename ( uuid , META ) ) return target | Combine a chunked file into a whole file again . Goes through each part in order and appends that part s bytes to another destination file . Chunks are stored in the chunks storage . |
58,469 | def resolve_model ( self , model ) : if not model : raise ValueError ( 'Unsupported model specifications' ) if isinstance ( model , basestring ) : classname = model elif isinstance ( model , dict ) and 'class' in model : classname = model [ 'class' ] else : raise ValueError ( 'Unsupported model specifications' ) try : return get_document ( classname ) except self . NotRegistered : message = 'Model "{0}" does not exist' . format ( classname ) raise ValueError ( message ) | Resolve a model given a name or dict with class entry . |
58,470 | def tooltip_ellipsis ( source , length = 0 ) : try : length = int ( length ) except ValueError : return source ellipsis = '<a href v-tooltip title="{0}">...</a>' . format ( source ) return Markup ( ( source [ : length ] + ellipsis ) if len ( source ) > length and length > 0 else source ) | return the plain text representation of markdown encoded text . That is the texted without any html tags . If length is 0 then it will not be truncated . |
58,471 | def filesize ( value ) : suffix = 'o' for unit in '' , 'K' , 'M' , 'G' , 'T' , 'P' , 'E' , 'Z' : if abs ( value ) < 1024.0 : return "%3.1f%s%s" % ( value , unit , suffix ) value /= 1024.0 return "%.1f%s%s" % ( value , 'Y' , suffix ) | Display a human readable filesize |
58,472 | def negociate_content ( default = 'json-ld' ) : mimetype = request . accept_mimetypes . best_match ( ACCEPTED_MIME_TYPES . keys ( ) ) return ACCEPTED_MIME_TYPES . get ( mimetype , default ) | Perform a content negociation on the format given the Accept header |
58,473 | def url_from_rdf ( rdf , prop ) : value = rdf . value ( prop ) if isinstance ( value , ( URIRef , Literal ) ) : return value . toPython ( ) elif isinstance ( value , RdfResource ) : return value . identifier . toPython ( ) | Try to extract An URL from a resource property . It can be expressed in many forms as a URIRef or a Literal |
58,474 | def graph_response ( graph , format ) : fmt = guess_format ( format ) if not fmt : abort ( 404 ) headers = { 'Content-Type' : RDF_MIME_TYPES [ fmt ] } kwargs = { } if fmt == 'json-ld' : kwargs [ 'context' ] = context if isinstance ( graph , RdfResource ) : graph = graph . graph return graph . serialize ( format = fmt , ** kwargs ) , 200 , headers | Return a proper flask response for a RDF resource given an expected format . |
58,475 | def valid_at ( self , valid_date ) : is_valid = db . Q ( validity__end__gt = valid_date , validity__start__lte = valid_date ) no_validity = db . Q ( validity = None ) return self ( is_valid | no_validity ) | Limit current QuerySet to zone valid at a given date |
58,476 | def resolve ( self , geoid , id_only = False ) : level , code , validity = geoids . parse ( geoid ) qs = self ( level = level , code = code ) if id_only : qs = qs . only ( 'id' ) if validity == 'latest' : result = qs . latest ( ) else : result = qs . valid_at ( validity ) . first ( ) return result . id if id_only and result else result | Resolve a GeoZone given a GeoID . |
58,477 | def keys_values ( self ) : keys_values = [ ] for value in self . keys . values ( ) : if isinstance ( value , list ) : keys_values += value elif isinstance ( value , basestring ) and not value . startswith ( '-' ) : keys_values . append ( value ) elif isinstance ( value , int ) and value >= 0 : keys_values . append ( str ( value ) ) return keys_values | Key values might be a list or not always return a list . |
58,478 | def level_i18n_name ( self ) : for level , name in spatial_granularities : if self . level == level : return name return self . level_name | In use within templates for dynamic translations . |
58,479 | def ancestors_objects ( self ) : ancestors_objects = [ ] for ancestor in self . ancestors : try : ancestor_object = GeoZone . objects . get ( id = ancestor ) except GeoZone . DoesNotExist : continue ancestors_objects . append ( ancestor_object ) ancestors_objects . sort ( key = lambda a : a . name ) return ancestors_objects | Ancestors objects sorted by name . |
58,480 | def child_level ( self ) : HANDLED_LEVELS = current_app . config . get ( 'HANDLED_LEVELS' ) try : return HANDLED_LEVELS [ HANDLED_LEVELS . index ( self . level ) - 1 ] except ( IndexError , ValueError ) : return None | Return the child level given handled levels . |
58,481 | def harvest ( self ) : if self . perform_initialization ( ) is not None : self . process_items ( ) self . finalize ( ) return self . job | Start the harvesting process |
58,482 | def perform_initialization ( self ) : log . debug ( 'Initializing backend' ) factory = HarvestJob if self . dryrun else HarvestJob . objects . create self . job = factory ( status = 'initializing' , started = datetime . now ( ) , source = self . source ) before_harvest_job . send ( self ) try : self . initialize ( ) self . job . status = 'initialized' if not self . dryrun : self . job . save ( ) except HarvestValidationError as e : log . info ( 'Initialization failed for "%s" (%s)' , safe_unicode ( self . source . name ) , self . source . backend ) error = HarvestError ( message = safe_unicode ( e ) ) self . job . errors . append ( error ) self . job . status = 'failed' self . end ( ) return except Exception as e : self . job . status = 'failed' error = HarvestError ( message = safe_unicode ( e ) ) self . job . errors . append ( error ) self . end ( ) msg = 'Initialization failed for "{0.name}" ({0.backend})' log . exception ( msg . format ( self . source ) ) return if self . max_items : self . job . items = self . job . items [ : self . max_items ] if self . job . items : log . debug ( 'Queued %s items' , len ( self . job . items ) ) return len ( self . job . items ) | Initialize the harvesting for a given job |
58,483 | def validate ( self , data , schema ) : try : return schema ( data ) except MultipleInvalid as ie : errors = [ ] for error in ie . errors : if error . path : field = '.' . join ( str ( p ) for p in error . path ) path = error . path value = data while path : attr = path . pop ( 0 ) try : if isinstance ( value , ( list , tuple ) ) : attr = int ( attr ) value = value [ attr ] except Exception : value = None txt = safe_unicode ( error ) . replace ( 'for dictionary value' , '' ) txt = txt . strip ( ) if isinstance ( error , RequiredFieldInvalid ) : msg = '[{0}] {1}' else : msg = '[{0}] {1}: {2}' try : msg = msg . format ( field , txt , str ( value ) ) except Exception : msg = '[{0}] {1}' . format ( field , txt ) else : msg = str ( error ) errors . append ( msg ) msg = '\n- ' . join ( [ 'Validation error:' ] + errors ) raise HarvestValidationError ( msg ) | Perform a data validation against a given schema . |
58,484 | def add ( obj ) : Form = badge_form ( obj . __class__ ) form = api . validate ( Form ) kind = form . kind . data badge = obj . get_badge ( kind ) if badge : return badge else : return obj . add_badge ( kind ) , 201 | Handle a badge add API . |
58,485 | def remove ( obj , kind ) : if not obj . get_badge ( kind ) : api . abort ( 404 , 'Badge does not exists' ) obj . remove_badge ( kind ) return '' , 204 | Handle badge removal API |
58,486 | def check_for_territories ( query ) : if not query or not current_app . config . get ( 'ACTIVATE_TERRITORIES' ) : return [ ] dbqs = db . Q ( ) query = query . lower ( ) is_digit = query . isdigit ( ) query_length = len ( query ) for level in current_app . config . get ( 'HANDLED_LEVELS' ) : if level == 'country' : continue q = db . Q ( level = level ) if ( query_length == 2 and level == 'fr:departement' and ( is_digit or query in ( '2a' , '2b' ) ) ) : q &= db . Q ( code = query ) elif query_length == 3 and level == 'fr:departement' and is_digit : q &= db . Q ( code = query ) elif query_length == 5 and level == 'fr:commune' and ( is_digit or query . startswith ( '2a' ) or query . startswith ( '2b' ) ) : q &= db . Q ( code = query ) | db . Q ( keys__postal__contains = query ) elif query_length >= 4 : q &= db . Q ( name__istartswith = query ) | db . Q ( name__iexact = query ) else : continue dbqs |= q if dbqs . empty : return [ ] return GeoZone . objects ( dbqs ) . order_by ( '-population' , '-area' ) | Return a geozone queryset of territories given the query . |
58,487 | def build ( level , code , validity = None ) : spatial = ':' . join ( ( level , code ) ) if not validity : return spatial elif isinstance ( validity , basestring ) : return '@' . join ( ( spatial , validity ) ) elif isinstance ( validity , datetime ) : return '@' . join ( ( spatial , validity . date ( ) . isoformat ( ) ) ) elif isinstance ( validity , date ) : return '@' . join ( ( spatial , validity . isoformat ( ) ) ) else : msg = 'Unknown GeoID validity type: {0}' raise GeoIDError ( msg . format ( type ( validity ) . __name__ ) ) | Serialize a GeoID from its parts |
58,488 | def from_zone ( zone ) : validity = zone . validity . start if zone . validity else None return build ( zone . level , zone . code , validity ) | Build a GeoID from a given zone |
58,489 | def temporal_from_rdf ( period_of_time ) : try : if isinstance ( period_of_time , Literal ) : return temporal_from_literal ( str ( period_of_time ) ) elif isinstance ( period_of_time , RdfResource ) : return temporal_from_resource ( period_of_time ) except Exception : log . warning ( 'Unable to parse temporal coverage' , exc_info = True ) | Failsafe parsing of a temporal coverage |
58,490 | def title_from_rdf ( rdf , url ) : title = rdf_value ( rdf , DCT . title ) if title : return title if url : last_part = url . split ( '/' ) [ - 1 ] if '.' in last_part and '?' not in last_part : return last_part fmt = rdf_value ( rdf , DCT . term ( 'format' ) ) lang = current_app . config [ 'DEFAULT_LANGUAGE' ] with i18n . language ( lang ) : if fmt : return i18n . _ ( '{format} resource' ) . format ( format = fmt . lower ( ) ) else : return i18n . _ ( 'Nameless resource' ) | Try to extract a distribution title from a property . As it s not a mandatory property it fallback on building a title from the URL then the format and in last ressort a generic resource name . |
58,491 | def check_url_does_not_exists ( form , field ) : if field . data != field . object_data and Reuse . url_exists ( field . data ) : raise validators . ValidationError ( _ ( 'This URL is already registered' ) ) | Ensure a reuse URL is not yet registered |
58,492 | def obj_to_string ( obj ) : if not obj : return None elif isinstance ( obj , bytes ) : return obj . decode ( 'utf-8' ) elif isinstance ( obj , basestring ) : return obj elif is_lazy_string ( obj ) : return obj . value elif hasattr ( obj , '__html__' ) : return obj . __html__ ( ) else : return str ( obj ) | Render an object into a unicode string if possible |
58,493 | def add_filter ( self , filter_values ) : field = self . _params [ 'field' ] filters = [ Q ( 'bool' , should = [ Q ( 'term' , ** { field : v } ) for v in value . split ( OR_SEPARATOR ) ] ) if OR_SEPARATOR in value else Q ( 'term' , ** { field : value } ) for value in filter_values ] return Q ( 'bool' , must = filters ) if len ( filters ) > 1 else filters [ 0 ] | Improve the original one to deal with OR cases . |
58,494 | def get_values ( self , data , filter_values ) : values = super ( ModelTermsFacet , self ) . get_values ( data , filter_values ) ids = [ key for ( key , doc_count , selected ) in values ] ids = [ self . model_field . to_mongo ( id ) for id in ids ] objects = self . model . objects . in_bulk ( ids ) return [ ( objects . get ( self . model_field . to_mongo ( key ) ) , doc_count , selected ) for ( key , doc_count , selected ) in values ] | Turn the raw bucket data into a list of tuples containing the object number of documents and a flag indicating whether this value has been selected or not . |
58,495 | def save ( self , commit = True , ** kwargs ) : org = super ( OrganizationForm , self ) . save ( commit = False , ** kwargs ) if not org . id : user = current_user . _get_current_object ( ) member = Member ( user = user , role = 'admin' ) org . members . append ( member ) if commit : org . save ( ) return org | Register the current user as admin on creation |
58,496 | def get_cache_key ( path ) : try : path_hash = hashlib . md5 ( path ) . hexdigest ( ) except TypeError : path_hash = hashlib . md5 ( path . encode ( 'utf-8' ) ) . hexdigest ( ) return settings . cache_key_prefix + path_hash | Create a cache key by concatenating the prefix with a hash of the path . |
58,497 | def get_remote_etag ( storage , prefixed_path ) : normalized_path = safe_join ( storage . location , prefixed_path ) . replace ( '\\' , '/' ) try : return storage . bucket . get_key ( normalized_path ) . etag except AttributeError : pass try : return storage . bucket . Object ( normalized_path ) . e_tag except : pass return None | Get etag of path from S3 using boto or boto3 . |
58,498 | def get_etag ( storage , path , prefixed_path ) : cache_key = get_cache_key ( path ) etag = cache . get ( cache_key , False ) if etag is False : etag = get_remote_etag ( storage , prefixed_path ) cache . set ( cache_key , etag ) return etag | Get etag of path from cache or S3 - in that order . |
58,499 | def get_file_hash ( storage , path ) : contents = storage . open ( path ) . read ( ) file_hash = hashlib . md5 ( contents ) . hexdigest ( ) content_type = mimetypes . guess_type ( path ) [ 0 ] or 'application/octet-stream' if settings . is_gzipped and content_type in settings . gzip_content_types : cache_key = get_cache_key ( 'gzip_hash_%s' % file_hash ) file_hash = cache . get ( cache_key , False ) if file_hash is False : buffer = BytesIO ( ) zf = gzip . GzipFile ( mode = 'wb' , compresslevel = 6 , fileobj = buffer , mtime = 0.0 ) zf . write ( force_bytes ( contents ) ) zf . close ( ) file_hash = hashlib . md5 ( buffer . getvalue ( ) ) . hexdigest ( ) cache . set ( cache_key , file_hash ) return '"%s"' % file_hash | Create md5 hash from file contents . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.