idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
58,200
def loadtxt ( fname , dtype = "float" , delimiter = "\t" , usecols = None , comments = "#" ) : r f = open ( fname , "r" ) next_one = False units = [ ] num_cols = - 1 for line in f . readlines ( ) : words = line . strip ( ) . split ( ) if len ( words ) == 0 : continue if line [ 0 ] == comments : if next_one : units = words [ 1 : ] if len ( words ) == 2 and words [ 1 ] == "Units" : next_one = True else : col_words = line . strip ( ) . split ( delimiter ) for word in col_words : float ( word ) num_cols = len ( col_words ) break f . close ( ) if len ( units ) != num_cols : units = [ "dimensionless" ] * num_cols arrays = np . loadtxt ( fname , dtype = dtype , comments = comments , delimiter = delimiter , converters = None , unpack = True , usecols = usecols , ndmin = 0 , ) if len ( arrays . shape ) < 2 : arrays = [ arrays ] if usecols is not None : units = [ units [ col ] for col in usecols ] ret = tuple ( [ unyt_array ( arr , unit ) for arr , unit in zip ( arrays , units ) ] ) if len ( ret ) == 1 : return ret [ 0 ] return ret
r Load unyt_arrays with unit information from a text file . Each row in the text file must have the same number of values .
58,201
def savetxt ( fname , arrays , fmt = "%.18e" , delimiter = "\t" , header = "" , footer = "" , comments = "#" ) : r if not isinstance ( arrays , list ) : arrays = [ arrays ] units = [ ] for array in arrays : if hasattr ( array , "units" ) : units . append ( str ( array . units ) ) else : units . append ( "dimensionless" ) if header != "" and not header . endswith ( "\n" ) : header += "\n" header += " Units\n " + "\t" . join ( units ) np . savetxt ( fname , np . transpose ( arrays ) , header = header , fmt = fmt , delimiter = delimiter , footer = footer , newline = "\n" , comments = comments , )
r Write unyt_arrays with unit information to a text file .
58,202
def convert_to_units ( self , units , equivalence = None , ** kwargs ) : units = _sanitize_units_convert ( units , self . units . registry ) if equivalence is None : conv_data = _check_em_conversion ( self . units , units , registry = self . units . registry ) if any ( conv_data ) : new_units , ( conv_factor , offset ) = _em_conversion ( self . units , conv_data , units ) else : new_units = units ( conv_factor , offset ) = self . units . get_conversion_factor ( new_units , self . dtype ) self . units = new_units values = self . d if self . dtype . kind in ( "u" , "i" ) : dsize = values . dtype . itemsize new_dtype = "f" + str ( dsize ) large = LARGE_INPUT . get ( dsize , 0 ) if large and np . any ( np . abs ( values ) > large ) : warnings . warn ( "Overflow encountered while converting to units '%s'" % new_units , RuntimeWarning , stacklevel = 2 , ) float_values = values . astype ( new_dtype ) values . dtype = new_dtype self . dtype = new_dtype np . copyto ( values , float_values ) values *= conv_factor if offset : np . subtract ( values , offset , values ) else : self . convert_to_equivalent ( units , equivalence , ** kwargs )
Convert the array to the given units in - place .
58,203
def convert_to_base ( self , unit_system = None , equivalence = None , ** kwargs ) : self . convert_to_units ( self . units . get_base_equivalent ( unit_system ) , equivalence = equivalence , ** kwargs )
Convert the array in - place to the equivalent base units in the specified unit system .
58,204
def convert_to_cgs ( self , equivalence = None , ** kwargs ) : self . convert_to_units ( self . units . get_cgs_equivalent ( ) , equivalence = equivalence , ** kwargs )
Convert the array and in - place to the equivalent cgs units .
58,205
def convert_to_mks ( self , equivalence = None , ** kwargs ) : self . convert_to_units ( self . units . get_mks_equivalent ( ) , equivalence , ** kwargs )
Convert the array and units to the equivalent mks units .
58,206
def to_value ( self , units = None , equivalence = None , ** kwargs ) : if units is None : v = self . value else : v = self . in_units ( units , equivalence = equivalence , ** kwargs ) . value if isinstance ( self , unyt_quantity ) : return float ( v ) else : return v
Creates a copy of this array with the data in the supplied units and returns it without units . Output is therefore a bare NumPy array .
58,207
def in_base ( self , unit_system = None ) : us = _sanitize_unit_system ( unit_system , self ) try : conv_data = _check_em_conversion ( self . units , unit_system = us , registry = self . units . registry ) except MKSCGSConversionError : raise UnitsNotReducible ( self . units , us ) if any ( conv_data ) : to_units , ( conv , offset ) = _em_conversion ( self . units , conv_data , unit_system = us ) else : to_units = self . units . get_base_equivalent ( unit_system ) conv , offset = self . units . get_conversion_factor ( to_units , self . dtype ) new_dtype = np . dtype ( "f" + str ( self . dtype . itemsize ) ) conv = new_dtype . type ( conv ) ret = self . v * conv if offset : ret = ret - offset return type ( self ) ( ret , to_units )
Creates a copy of this array with the data in the specified unit system and returns it in that system s base units .
58,208
def argsort ( self , axis = - 1 , kind = "quicksort" , order = None ) : return self . view ( np . ndarray ) . argsort ( axis , kind , order )
Returns the indices that would sort the array .
58,209
def from_astropy ( cls , arr , unit_registry = None ) : try : u = arr . unit _arr = arr except AttributeError : u = arr _arr = 1.0 * u ap_units = [ ] for base , exponent in zip ( u . bases , u . powers ) : unit_str = base . to_string ( ) if unit_str == "h" : unit_str = "hr" ap_units . append ( "%s**(%s)" % ( unit_str , Rational ( exponent ) ) ) ap_units = "*" . join ( ap_units ) if isinstance ( _arr . value , np . ndarray ) and _arr . shape != ( ) : return unyt_array ( _arr . value , ap_units , registry = unit_registry ) else : return unyt_quantity ( _arr . value , ap_units , registry = unit_registry )
Convert an AstroPy Quantity to a unyt_array or unyt_quantity .
58,210
def to_astropy ( self , ** kwargs ) : return self . value * _astropy . units . Unit ( str ( self . units ) , ** kwargs )
Creates a new AstroPy quantity with the same unit information .
58,211
def from_pint ( cls , arr , unit_registry = None ) : p_units = [ ] for base , exponent in arr . _units . items ( ) : bs = convert_pint_units ( base ) p_units . append ( "%s**(%s)" % ( bs , Rational ( exponent ) ) ) p_units = "*" . join ( p_units ) if isinstance ( arr . magnitude , np . ndarray ) : return unyt_array ( arr . magnitude , p_units , registry = unit_registry ) else : return unyt_quantity ( arr . magnitude , p_units , registry = unit_registry )
Convert a Pint Quantity to a unyt_array or unyt_quantity .
58,212
def to_pint ( self , unit_registry = None ) : if unit_registry is None : unit_registry = _pint . UnitRegistry ( ) powers_dict = self . units . expr . as_powers_dict ( ) units = [ ] for unit , pow in powers_dict . items ( ) : if str ( unit ) . endswith ( "yr" ) and len ( str ( unit ) ) in [ 2 , 3 ] : unit = str ( unit ) . replace ( "yr" , "year" ) units . append ( "%s**(%s)" % ( unit , Rational ( pow ) ) ) units = "*" . join ( units ) return unit_registry . Quantity ( self . value , units )
Convert a unyt_array or unyt_quantity to a Pint Quantity .
58,213
def write_hdf5 ( self , filename , dataset_name = None , info = None , group_name = None ) : r from unyt . _on_demand_imports import _h5py as h5py import pickle if info is None : info = { } info [ "units" ] = str ( self . units ) info [ "unit_registry" ] = np . void ( pickle . dumps ( self . units . registry . lut ) ) if dataset_name is None : dataset_name = "array_data" f = h5py . File ( filename ) if group_name is not None : if group_name in f : g = f [ group_name ] else : g = f . create_group ( group_name ) else : g = f if dataset_name in g . keys ( ) : d = g [ dataset_name ] if d . shape == self . shape and d . dtype == self . dtype : d [ ... ] = self for k in d . attrs . keys ( ) : del d . attrs [ k ] else : del f [ dataset_name ] d = g . create_dataset ( dataset_name , data = self ) else : d = g . create_dataset ( dataset_name , data = self ) for k , v in info . items ( ) : d . attrs [ k ] = v f . close ( )
r Writes a unyt_array to hdf5 file .
58,214
def from_hdf5 ( cls , filename , dataset_name = None , group_name = None ) : r from unyt . _on_demand_imports import _h5py as h5py import pickle if dataset_name is None : dataset_name = "array_data" f = h5py . File ( filename ) if group_name is not None : g = f [ group_name ] else : g = f dataset = g [ dataset_name ] data = dataset [ : ] units = dataset . attrs . get ( "units" , "" ) unit_lut = pickle . loads ( dataset . attrs [ "unit_registry" ] . tostring ( ) ) f . close ( ) registry = UnitRegistry ( lut = unit_lut , add_default_symbols = False ) return cls ( data , units , registry = registry )
r Attempts read in and convert a dataset in an hdf5 file into a unyt_array .
58,215
def copy ( self , order = "C" ) : return type ( self ) ( np . copy ( np . asarray ( self ) ) , self . units )
Return a copy of the array .
58,216
def dot ( self , b , out = None ) : res_units = self . units * getattr ( b , "units" , NULL_UNIT ) ret = self . view ( np . ndarray ) . dot ( np . asarray ( b ) , out = out ) * res_units if out is not None : out . units = res_units return ret
dot product of two arrays .
58,217
def import_units ( module , namespace ) : for key , value in module . __dict__ . items ( ) : if isinstance ( value , ( unyt_quantity , Unit ) ) : namespace [ key ] = value
Import Unit objects from a module into a namespace
58,218
def _lookup_unit_symbol ( symbol_str , unit_symbol_lut ) : if symbol_str in unit_symbol_lut : return unit_symbol_lut [ symbol_str ] prefix , symbol_wo_prefix = _split_prefix ( symbol_str , unit_symbol_lut ) if prefix : unit_data = unit_symbol_lut [ symbol_wo_prefix ] prefix_value = unit_prefixes [ prefix ] [ 0 ] if symbol_wo_prefix != "cm" and symbol_wo_prefix . endswith ( "cm" ) : sub_symbol_wo_prefix = symbol_wo_prefix [ : - 2 ] sub_symbol_str = symbol_str [ : - 2 ] else : sub_symbol_wo_prefix = symbol_wo_prefix sub_symbol_str = symbol_str latex_repr = unit_data [ 3 ] . replace ( "{" + sub_symbol_wo_prefix + "}" , "{" + sub_symbol_str + "}" ) ret = ( unit_data [ 0 ] * prefix_value , unit_data [ 1 ] , unit_data [ 2 ] , latex_repr , False , ) unit_symbol_lut [ symbol_str ] = ret return ret raise UnitParseError ( "Could not find unit symbol '%s' in the provided " "symbols." % symbol_str )
Searches for the unit data tuple corresponding to the given symbol .
58,219
def unit_system_id ( self ) : if self . _unit_system_id is None : hash_data = bytearray ( ) for k , v in sorted ( self . lut . items ( ) ) : hash_data . extend ( k . encode ( "utf8" ) ) hash_data . extend ( repr ( v ) . encode ( "utf8" ) ) m = md5 ( ) m . update ( hash_data ) self . _unit_system_id = str ( m . hexdigest ( ) ) return self . _unit_system_id
This is a unique identifier for the unit registry created from a FNV hash . It is needed to register a dataset s code unit system in the unit system registry .
58,220
def add ( self , symbol , base_value , dimensions , tex_repr = None , offset = None , prefixable = False , ) : from unyt . unit_object import _validate_dimensions self . _unit_system_id = None if not isinstance ( base_value , float ) : raise UnitParseError ( "base_value (%s) must be a float, got a %s." % ( base_value , type ( base_value ) ) ) if offset is not None : if not isinstance ( offset , float ) : raise UnitParseError ( "offset value (%s) must be a float, got a %s." % ( offset , type ( offset ) ) ) else : offset = 0.0 _validate_dimensions ( dimensions ) if tex_repr is None : tex_repr = r"\rm{" + symbol . replace ( "_" , r"\ " ) + "}" self . lut [ symbol ] = ( base_value , dimensions , offset , tex_repr , prefixable )
Add a symbol to this registry .
58,221
def remove ( self , symbol ) : self . _unit_system_id = None if symbol not in self . lut : raise SymbolNotFoundError ( "Tried to remove the symbol '%s', but it does not exist " "in this registry." % symbol ) del self . lut [ symbol ]
Remove the entry for the unit matching symbol .
58,222
def modify ( self , symbol , base_value ) : self . _unit_system_id = None if symbol not in self . lut : raise SymbolNotFoundError ( "Tried to modify the symbol '%s', but it does not exist " "in this registry." % symbol ) if hasattr ( base_value , "in_base" ) : new_dimensions = base_value . units . dimensions base_value = base_value . in_base ( "mks" ) base_value = base_value . value else : new_dimensions = self . lut [ symbol ] [ 1 ] self . lut [ symbol ] = ( float ( base_value ) , new_dimensions ) + self . lut [ symbol ] [ 2 : ]
Change the base value of a unit symbol . Useful for adjusting code units after parsing parameters .
58,223
def to_json ( self ) : sanitized_lut = { } for k , v in self . lut . items ( ) : san_v = list ( v ) repr_dims = str ( v [ 1 ] ) san_v [ 1 ] = repr_dims sanitized_lut [ k ] = tuple ( san_v ) return json . dumps ( sanitized_lut )
Returns a json - serialized version of the unit registry
58,224
def from_json ( cls , json_text ) : data = json . loads ( json_text ) lut = { } for k , v in data . items ( ) : unsan_v = list ( v ) unsan_v [ 1 ] = sympify ( v [ 1 ] , locals = vars ( unyt_dims ) ) lut [ k ] = tuple ( unsan_v ) return cls ( lut = lut , add_default_symbols = False )
Returns a UnitRegistry object from a json - serialized unit registry
58,225
def _em_conversion ( orig_units , conv_data , to_units = None , unit_system = None ) : conv_unit , canonical_unit , scale = conv_data if conv_unit is None : conv_unit = canonical_unit new_expr = scale * canonical_unit . expr if unit_system is not None : to_units = Unit ( conv_unit . expr , registry = orig_units . registry ) new_units = Unit ( new_expr , registry = orig_units . registry ) conv = new_units . get_conversion_factor ( to_units ) return to_units , conv
Convert between E&M & MKS base units .
58,226
def _check_em_conversion ( unit , to_unit = None , unit_system = None , registry = None ) : em_map = ( ) if unit == to_unit or unit . dimensions not in em_conversion_dims : return em_map if unit . is_atomic : prefix , unit_wo_prefix = _split_prefix ( str ( unit ) , unit . registry . lut ) else : prefix , unit_wo_prefix = "" , str ( unit ) if ( unit_wo_prefix , unit . dimensions ) in em_conversions : em_info = em_conversions [ unit_wo_prefix , unit . dimensions ] em_unit = Unit ( prefix + em_info [ 1 ] , registry = registry ) if to_unit is None : cmks_in_unit = current_mks in unit . dimensions . atoms ( ) cmks_in_unit_system = unit_system . units_map [ current_mks ] cmks_in_unit_system = cmks_in_unit_system is not None if cmks_in_unit and cmks_in_unit_system : em_map = ( unit_system [ unit . dimensions ] , unit , 1.0 ) else : em_map = ( None , em_unit , em_info [ 2 ] ) elif to_unit . dimensions == em_unit . dimensions : em_map = ( to_unit , em_unit , em_info [ 2 ] ) if em_map : return em_map if unit_system is None : from unyt . unit_systems import unit_system_registry unit_system = unit_system_registry [ "mks" ] for unit_atom in unit . expr . atoms ( ) : if unit_atom . is_Number : continue bu = str ( unit_atom ) budims = Unit ( bu , registry = registry ) . dimensions try : if str ( unit_system [ budims ] ) == bu : continue except MissingMKSCurrent : raise MKSCGSConversionError ( unit ) return em_map
Check to see if the units contain E&M units
58,227
def _get_conversion_factor ( old_units , new_units , dtype ) : if old_units . dimensions != new_units . dimensions : raise UnitConversionError ( old_units , old_units . dimensions , new_units , new_units . dimensions ) ratio = old_units . base_value / new_units . base_value if old_units . base_offset == 0 and new_units . base_offset == 0 : return ( ratio , None ) else : return ratio , ratio * old_units . base_offset - new_units . base_offset
Get the conversion factor between two units of equivalent dimensions . This is the number you multiply data by to convert from values in old_units to values in new_units .
58,228
def _get_unit_data_from_expr ( unit_expr , unit_symbol_lut ) : if isinstance ( unit_expr , Number ) : if unit_expr is sympy_one : return ( 1.0 , sympy_one ) return ( float ( unit_expr ) , sympy_one ) if isinstance ( unit_expr , Symbol ) : return _lookup_unit_symbol ( unit_expr . name , unit_symbol_lut ) if isinstance ( unit_expr , Pow ) : unit_data = _get_unit_data_from_expr ( unit_expr . args [ 0 ] , unit_symbol_lut ) power = unit_expr . args [ 1 ] if isinstance ( power , Symbol ) : raise UnitParseError ( "Invalid unit expression '%s'." % unit_expr ) conv = float ( unit_data [ 0 ] ** power ) unit = unit_data [ 1 ] ** power return ( conv , unit ) if isinstance ( unit_expr , Mul ) : base_value = 1.0 dimensions = 1 for expr in unit_expr . args : unit_data = _get_unit_data_from_expr ( expr , unit_symbol_lut ) base_value *= unit_data [ 0 ] dimensions *= unit_data [ 1 ] return ( float ( base_value ) , dimensions ) raise UnitParseError ( "Cannot parse for unit data from '%s'. Please supply" " an expression of only Unit, Symbol, Pow, and Mul" "objects." % str ( unit_expr ) )
Grabs the total base_value and dimensions from a valid unit expression .
58,229
def define_unit ( symbol , value , tex_repr = None , offset = None , prefixable = False , registry = None ) : from unyt . array import unyt_quantity , _iterable import unyt if registry is None : registry = default_unit_registry if symbol in registry : raise RuntimeError ( "Unit symbol '%s' already exists in the provided " "registry" % symbol ) if not isinstance ( value , unyt_quantity ) : if _iterable ( value ) and len ( value ) == 2 : value = unyt_quantity ( value [ 0 ] , value [ 1 ] , registry = registry ) else : raise RuntimeError ( '"value" needs to be a quantity or ' "(value, unit) tuple!" ) base_value = float ( value . in_base ( unit_system = "mks" ) ) dimensions = value . units . dimensions registry . add ( symbol , base_value , dimensions , prefixable = prefixable , tex_repr = tex_repr , offset = offset , ) if registry is default_unit_registry : u = Unit ( symbol , registry = registry ) setattr ( unyt , symbol , u )
Define a new unit and add it to the specified unit registry .
58,230
def latex_repr ( self ) : if self . _latex_repr is not None : return self . _latex_repr if self . expr . is_Atom : expr = self . expr else : expr = self . expr . copy ( ) self . _latex_repr = _get_latex_representation ( expr , self . registry ) return self . _latex_repr
A LaTeX representation for the unit
58,231
def is_code_unit ( self ) : for atom in self . expr . atoms ( ) : if not ( str ( atom ) . startswith ( "code" ) or atom . is_Number ) : return False return True
Is this a code unit?
58,232
def list_equivalencies ( self ) : from unyt . equivalencies import equivalence_registry for k , v in equivalence_registry . items ( ) : if self . has_equivalent ( k ) : print ( v ( ) )
Lists the possible equivalencies associated with this unit object
58,233
def get_base_equivalent ( self , unit_system = None ) : from unyt . unit_registry import _sanitize_unit_system unit_system = _sanitize_unit_system ( unit_system , self ) try : conv_data = _check_em_conversion ( self . units , registry = self . registry , unit_system = unit_system ) except MKSCGSConversionError : raise UnitsNotReducible ( self . units , unit_system ) if any ( conv_data ) : new_units , _ = _em_conversion ( self , conv_data , unit_system = unit_system ) else : try : new_units = unit_system [ self . dimensions ] except MissingMKSCurrent : raise UnitsNotReducible ( self . units , unit_system ) return Unit ( new_units , registry = self . registry )
Create and return dimensionally - equivalent units in a specified base .
58,234
def as_coeff_unit ( self ) : coeff , mul = self . expr . as_coeff_Mul ( ) coeff = float ( coeff ) ret = Unit ( mul , self . base_value / coeff , self . base_offset , self . dimensions , self . registry , ) return coeff , ret
Factor the coefficient multiplying a unit
58,235
def simplify ( self ) : expr = self . expr self . expr = _cancel_mul ( expr , self . registry ) return self
Return a new equivalent unit object with a simplified unit expression
58,236
def _auto_positive_symbol ( tokens , local_dict , global_dict ) : result = [ ] tokens . append ( ( None , None ) ) for tok , nextTok in zip ( tokens , tokens [ 1 : ] ) : tokNum , tokVal = tok nextTokNum , nextTokVal = nextTok if tokNum == token . NAME : name = tokVal if name in global_dict : obj = global_dict [ name ] if isinstance ( obj , ( Basic , type ) ) or callable ( obj ) : result . append ( ( token . NAME , name ) ) continue try : used_name = inv_name_alternatives [ str ( name ) ] except KeyError : used_name = str ( name ) result . extend ( [ ( token . NAME , "Symbol" ) , ( token . OP , "(" ) , ( token . NAME , repr ( used_name ) ) , ( token . OP , "," ) , ( token . NAME , "positive" ) , ( token . OP , "=" ) , ( token . NAME , "True" ) , ( token . OP , ")" ) , ] ) else : result . append ( ( tokNum , tokVal ) ) return result
Inserts calls to Symbol for undefined variables . Passes in positive = True as a keyword argument . Adapted from sympy . sympy . parsing . sympy_parser . auto_symbol
58,237
def intersection ( self , range ) : if self . worksheet != range . worksheet : return None start = ( max ( self . _start [ 0 ] , range . _start [ 0 ] ) , max ( self . _start [ 1 ] , range . _start [ 1 ] ) ) end = ( min ( self . _end [ 0 ] , range . _end [ 0 ] ) , min ( self . _end [ 1 ] , range . _end [ 1 ] ) ) if end [ 0 ] < start [ 0 ] or end [ 1 ] < start [ 1 ] : return None return Range ( start , end , self . worksheet , validate = False )
Calculates the intersection with another range object
58,238
def interprocess_locked ( path ) : lock = InterProcessLock ( path ) def decorator ( f ) : @ six . wraps ( f ) def wrapper ( * args , ** kwargs ) : with lock : return f ( * args , ** kwargs ) return wrapper return decorator
Acquires & releases a interprocess lock around call into decorated function .
58,239
def acquire ( self , blocking = True , delay = DELAY_INCREMENT , max_delay = MAX_DELAY , timeout = None ) : if delay < 0 : raise ValueError ( "Delay must be greater than or equal to zero" ) if timeout is not None and timeout < 0 : raise ValueError ( "Timeout must be greater than or equal to zero" ) if delay >= max_delay : max_delay = delay self . _do_open ( ) watch = _utils . StopWatch ( duration = timeout ) r = _utils . Retry ( delay , max_delay , sleep_func = self . sleep_func , watch = watch ) with watch : gotten = r ( self . _try_acquire , blocking , watch ) if not gotten : self . acquired = False return False else : self . acquired = True self . logger . log ( _utils . BLATHER , "Acquired file lock `%s` after waiting %0.3fs [%s" " attempts were required]" , self . path , watch . elapsed ( ) , r . attempts ) return True
Attempt to acquire the given lock .
58,240
def release ( self ) : if not self . acquired : raise threading . ThreadError ( "Unable to release an unacquired" " lock" ) try : self . unlock ( ) except IOError : self . logger . exception ( "Could not unlock the acquired lock opened" " on `%s`" , self . path ) else : self . acquired = False try : self . _do_close ( ) except IOError : self . logger . exception ( "Could not close the file handle" " opened on `%s`" , self . path ) else : self . logger . log ( _utils . BLATHER , "Unlocked and closed file lock open on" " `%s`" , self . path )
Release the previously acquired lock .
58,241
def canonicalize_path ( path ) : if isinstance ( path , six . binary_type ) : return path if isinstance ( path , six . text_type ) : return _fsencode ( path ) else : return canonicalize_path ( str ( path ) )
Canonicalizes a potential path .
58,242
def read_locked ( * args , ** kwargs ) : def decorator ( f ) : attr_name = kwargs . get ( 'lock' , '_lock' ) @ six . wraps ( f ) def wrapper ( self , * args , ** kwargs ) : rw_lock = getattr ( self , attr_name ) with rw_lock . read_lock ( ) : return f ( self , * args , ** kwargs ) return wrapper if kwargs or not args : return decorator else : if len ( args ) == 1 : return decorator ( args [ 0 ] ) else : return decorator
Acquires & releases a read lock around call into decorated method .
58,243
def write_locked ( * args , ** kwargs ) : def decorator ( f ) : attr_name = kwargs . get ( 'lock' , '_lock' ) @ six . wraps ( f ) def wrapper ( self , * args , ** kwargs ) : rw_lock = getattr ( self , attr_name ) with rw_lock . write_lock ( ) : return f ( self , * args , ** kwargs ) return wrapper if kwargs or not args : return decorator else : if len ( args ) == 1 : return decorator ( args [ 0 ] ) else : return decorator
Acquires & releases a write lock around call into decorated method .
58,244
def is_writer ( self , check_pending = True ) : me = self . _current_thread ( ) if self . _writer == me : return True if check_pending : return me in self . _pending_writers else : return False
Returns if the caller is the active writer or a pending writer .
58,245
def owner ( self ) : if self . _writer is not None : return self . WRITER if self . _readers : return self . READER return None
Returns whether the lock is locked by a writer or reader .
58,246
def read_lock ( self ) : me = self . _current_thread ( ) if me in self . _pending_writers : raise RuntimeError ( "Writer %s can not acquire a read lock" " while waiting for the write lock" % me ) with self . _cond : while True : if self . _writer is None or self . _writer == me : try : self . _readers [ me ] = self . _readers [ me ] + 1 except KeyError : self . _readers [ me ] = 1 break self . _cond . wait ( ) try : yield self finally : with self . _cond : try : me_instances = self . _readers [ me ] if me_instances > 1 : self . _readers [ me ] = me_instances - 1 else : self . _readers . pop ( me ) except KeyError : pass self . _cond . notify_all ( )
Context manager that grants a read lock .
58,247
def write_lock ( self ) : me = self . _current_thread ( ) i_am_writer = self . is_writer ( check_pending = False ) if self . is_reader ( ) and not i_am_writer : raise RuntimeError ( "Reader %s to writer privilege" " escalation not allowed" % me ) if i_am_writer : yield self else : with self . _cond : self . _pending_writers . append ( me ) while True : if len ( self . _readers ) == 0 and self . _writer is None : if self . _pending_writers [ 0 ] == me : self . _writer = self . _pending_writers . popleft ( ) break self . _cond . wait ( ) try : yield self finally : with self . _cond : self . _writer = None self . _cond . notify_all ( )
Context manager that grants a write lock .
58,248
def send_sms ( self , text , ** kw ) : params = { 'user' : self . _user , 'pass' : self . _passwd , 'msg' : text } kw . setdefault ( "verify" , False ) if not kw [ "verify" ] : requests . packages . urllib3 . disable_warnings ( InsecureRequestWarning ) res = requests . get ( FreeClient . BASE_URL , params = params , ** kw ) return FreeResponse ( res . status_code )
Send an SMS . Since Free only allows us to send SMSes to ourselves you don t have to provide your phone number .
58,249
def create_filebase_name ( self , group_info , extension = 'gz' , file_name = None ) : dirname = self . filebase . formatted_dirname ( groups = group_info ) if not file_name : file_name = self . filebase . prefix_template + '.' + extension return dirname , file_name
Return tuple of resolved destination folder name and file name
58,250
def write_batch ( self , batch ) : for item in batch : for key in item : self . aggregated_info [ 'occurrences' ] [ key ] += 1 self . increment_written_items ( ) if self . items_limit and self . items_limit == self . get_metadata ( 'items_count' ) : raise ItemsLimitReached ( 'Finishing job after items_limit reached: {} items written.' . format ( self . get_metadata ( 'items_count' ) ) ) self . logger . debug ( 'Wrote items' )
Receives the batch and writes it . This method is usually called from a manager .
58,251
def _get_aggregated_info ( self ) : agg_results = { } for key in self . aggregated_info [ 'occurrences' ] : agg_results [ key ] = { 'occurrences' : self . aggregated_info [ 'occurrences' ] . get ( key ) , 'coverage' : ( float ( self . aggregated_info [ 'occurrences' ] . get ( key ) ) / float ( self . get_metadata ( 'items_count' ) ) ) * 100 } return agg_results
Keeps track of aggregated info in a dictionary called self . aggregated_info
58,252
def create_document_batches ( jsonlines , id_field , max_batch_size = CLOUDSEARCH_MAX_BATCH_SIZE ) : batch = [ ] fixed_initial_size = 2 def create_entry ( line ) : try : record = json . loads ( line ) except : raise ValueError ( 'Could not parse JSON from: %s' % line ) key = record [ id_field ] return '{"type":"add","id":%s,"fields":%s}' % ( json . dumps ( key ) , line ) current_size = fixed_initial_size for line in jsonlines : entry = create_entry ( line ) entry_size = len ( entry ) + 1 if max_batch_size > ( current_size + entry_size ) : current_size += entry_size batch . append ( entry ) else : yield '[' + ',' . join ( batch ) + ']' batch = [ entry ] current_size = fixed_initial_size + entry_size if batch : yield '[' + ',' . join ( batch ) + ']'
Create batches in expected AWS Cloudsearch format limiting the byte size per batch according to given max_batch_size
58,253
def _post_document_batch ( self , batch ) : target_batch = '/2013-01-01/documents/batch' url = self . endpoint_url + target_batch return requests . post ( url , data = batch , headers = { 'Content-type' : 'application/json' } )
Send a batch to Cloudsearch endpoint
58,254
def _create_path_if_not_exist ( self , path ) : if path and not os . path . exists ( path ) : os . makedirs ( path )
Creates a folders path if it doesn t exist
58,255
def close ( self ) : if self . read_option ( 'save_pointer' ) : self . _update_last_pointer ( ) super ( S3Writer , self ) . close ( )
Called to clean all possible tmp files created during the process .
58,256
def get_boto_connection ( aws_access_key_id , aws_secret_access_key , region = None , bucketname = None , host = None ) : m = _AWS_ACCESS_KEY_ID_RE . match ( aws_access_key_id ) if m is None or m . group ( ) != aws_access_key_id : logging . error ( 'The provided aws_access_key_id is not in the correct format. It must \ be alphanumeric and contain between 16 and 32 characters.' ) if len ( aws_access_key_id ) > len ( aws_secret_access_key ) : logging . warn ( "The AWS credential keys aren't in the usual size," " are you using the correct ones?" ) import boto from boto . s3 . connection import OrdinaryCallingFormat extra_args = { } if host is not None : extra_args [ 'host' ] = host if bucketname is not None and '.' in bucketname : extra_args [ 'calling_format' ] = OrdinaryCallingFormat ( ) if region is None : return boto . connect_s3 ( aws_access_key_id , aws_secret_access_key , ** extra_args ) return boto . s3 . connect_to_region ( region , aws_access_key_id = aws_access_key_id , aws_secret_access_key = aws_secret_access_key , ** extra_args )
Conection parameters must be different only if bucket name has a period
58,257
def maybe_cast_list ( value , types ) : if not isinstance ( value , list ) : return value if type ( types ) not in ( list , tuple ) : types = ( types , ) for list_type in types : if issubclass ( list_type , list ) : try : return list_type ( value ) except ( TypeError , ValueError ) : pass return value
Try to coerce list values into more specific list subclasses in types .
58,258
def iterate_chunks ( file , chunk_size ) : chunk = file . read ( chunk_size ) while chunk : yield chunk chunk = file . read ( chunk_size )
Iterate chunks of size chunk_size from a file - like object
58,259
def unshift ( self , chunk ) : if chunk : self . _pos -= len ( chunk ) self . _unconsumed . append ( chunk )
Pushes a chunk of data back into the internal buffer . This is useful in certain situations where a stream is being consumed by code that needs to un - consume some amount of data that it has optimistically pulled out of the source so that the data can be passed on to some other party .
58,260
def readline ( self ) : line = "" n_pos = - 1 try : while n_pos < 0 : line += self . next_chunk ( ) n_pos = line . find ( '\n' ) except StopIteration : pass if n_pos >= 0 : line , extra = line [ : n_pos + 1 ] , line [ n_pos + 1 : ] self . unshift ( extra ) return line
Read until a new - line character is encountered
58,261
def close ( self ) : if callable ( getattr ( self . _file , 'close' , None ) ) : self . _iterator . close ( ) self . _iterator = None self . _unconsumed = None self . closed = True
Disable al operations and close the underlying file - like object if any
58,262
def configuration_from_uri ( uri , uri_regex ) : file_path = re . match ( uri_regex , uri ) . groups ( ) [ 0 ] with open ( file_path ) as f : configuration = pickle . load ( f ) [ 'configuration' ] configuration = yaml . safe_load ( configuration ) configuration [ 'exporter_options' ] [ 'resume' ] = True persistence_state_id = file_path . split ( os . path . sep ) [ - 1 ] configuration [ 'exporter_options' ] [ 'persistence_state_id' ] = persistence_state_id return configuration
returns a configuration object .
58,263
def buffer ( self , item ) : key = self . get_key_from_item ( item ) if not self . grouping_info . is_first_file_item ( key ) : self . items_group_files . add_item_separator_to_file ( key ) self . grouping_info . ensure_group_info ( key ) self . items_group_files . add_item_to_file ( item , key )
Receive an item and write it .
58,264
def parse_persistence_uri ( cls , persistence_uri ) : regex = cls . persistence_uri_re match = re . match ( regex , persistence_uri ) if not match : raise ValueError ( "Couldn't parse persistence URI: %s -- regex: %s)" % ( persistence_uri , regex ) ) conn_params = match . groupdict ( ) missing = { 'proto' , 'job_id' , 'database' } - set ( conn_params ) if missing : raise ValueError ( 'Missing required parameters: %s (given params: %s)' % ( tuple ( missing ) , conn_params ) ) persistence_state_id = int ( conn_params . pop ( 'job_id' ) ) db_uri = cls . build_db_conn_uri ( ** conn_params ) return db_uri , persistence_state_id
Parse a database URI and the persistence state ID from the given persistence URI
58,265
def configuration_from_uri ( cls , persistence_uri ) : db_uri , persistence_state_id = cls . parse_persistence_uri ( persistence_uri ) engine = create_engine ( db_uri ) Base . metadata . create_all ( engine ) Base . metadata . bind = engine DBSession = sessionmaker ( bind = engine ) session = DBSession ( ) job = session . query ( Job ) . filter ( Job . id == persistence_state_id ) . first ( ) configuration = job . configuration configuration = yaml . safe_load ( configuration ) configuration [ 'exporter_options' ] [ 'resume' ] = True configuration [ 'exporter_options' ] [ 'persistence_state_id' ] = persistence_state_id return configuration
Return a configuration object .
58,266
def _get_input_files ( cls , input_specification ) : if isinstance ( input_specification , ( basestring , dict ) ) : input_specification = [ input_specification ] elif not isinstance ( input_specification , list ) : raise ConfigurationError ( "Input specification must be string, list or dict." ) out = [ ] for input_unit in input_specification : if isinstance ( input_unit , basestring ) : out . append ( input_unit ) elif isinstance ( input_unit , dict ) : missing = object ( ) directory = input_unit . get ( 'dir' , missing ) dir_pointer = input_unit . get ( 'dir_pointer' , missing ) if directory is missing and dir_pointer is missing : raise ConfigurationError ( 'Input directory dict must contain' ' "dir" or "dir_pointer" element (but not both)' ) if directory is not missing and dir_pointer is not missing : raise ConfigurationError ( 'Input directory dict must not contain' ' both "dir" and "dir_pointer" elements' ) if dir_pointer is not missing : directory = cls . _get_pointer ( dir_pointer ) out . extend ( cls . _get_directory_files ( directory = directory , pattern = input_unit . get ( 'pattern' ) , include_dot_files = input_unit . get ( 'include_dot_files' , False ) ) ) else : raise ConfigurationError ( 'Input must only contain strings or dicts' ) return out
Get list of input files according to input definition .
58,267
def consume_messages ( self , batchsize ) : if not self . _reservoir : self . finished = True return for msg in self . _reservoir [ : batchsize ] : yield msg self . _reservoir = self . _reservoir [ batchsize : ]
Get messages batch from the reservoir
58,268
def decompress_messages ( self , offmsgs ) : for offmsg in offmsgs : yield offmsg . message . key , self . decompress_fun ( offmsg . message . value )
Decompress pre - defined compressed fields for each message . Msgs should be unpacked before this step .
58,269
def filter_batch ( self , batch ) : for item in batch : if self . filter ( item ) : yield item else : self . set_metadata ( 'filtered_out' , self . get_metadata ( 'filtered_out' ) + 1 ) self . total += 1 self . _log_progress ( )
Receives the batch filters it and returns it .
58,270
def write_batch ( self , batch ) : for item in batch : self . write_buffer . buffer ( item ) key = self . write_buffer . get_key_from_item ( item ) if self . write_buffer . should_write_buffer ( key ) : self . _write_current_buffer_for_group_key ( key ) self . increment_written_items ( ) self . _check_items_limit ( )
Buffer a batch of items to be written and update internal counters .
58,271
def _check_items_limit ( self ) : if self . items_limit and self . items_limit == self . get_metadata ( 'items_count' ) : raise ItemsLimitReached ( 'Finishing job after items_limit reached:' ' {} items written.' . format ( self . get_metadata ( 'items_count' ) ) )
Raise ItemsLimitReached if the writer reached the configured items limit .
58,272
def flush ( self ) : for key in self . grouping_info . keys ( ) : if self . _should_flush ( key ) : self . _write_current_buffer_for_group_key ( key )
Ensure all remaining buffers are written .
58,273
def has_manifest ( app , filename = 'manifest.json' ) : try : return pkg_resources . resource_exists ( app , filename ) except ImportError : return os . path . isabs ( filename ) and os . path . exists ( filename )
Verify the existance of a JSON assets manifest
58,274
def register_manifest ( app , filename = 'manifest.json' ) : if current_app . config . get ( 'TESTING' ) : return if not has_manifest ( app , filename ) : msg = '{filename} not found for {app}' . format ( ** locals ( ) ) raise ValueError ( msg ) manifest = _manifests . get ( app , { } ) manifest . update ( load_manifest ( app , filename ) ) _manifests [ app ] = manifest
Register an assets json manifest
58,275
def load_manifest ( app , filename = 'manifest.json' ) : if os . path . isabs ( filename ) : path = filename else : path = pkg_resources . resource_filename ( app , filename ) with io . open ( path , mode = 'r' , encoding = 'utf8' ) as stream : data = json . load ( stream ) _registered_manifests [ app ] = path return data
Load an assets json manifest
58,276
def from_manifest ( app , filename , raw = False , ** kwargs ) : cfg = current_app . config if current_app . config . get ( 'TESTING' ) : return path = _manifests [ app ] [ filename ] if not raw and cfg . get ( 'CDN_DOMAIN' ) and not cfg . get ( 'CDN_DEBUG' ) : scheme = 'https' if cfg . get ( 'CDN_HTTPS' ) else request . scheme prefix = '{}://' . format ( scheme ) if not path . startswith ( '/' ) : path = '/' + path return '' . join ( ( prefix , cfg [ 'CDN_DOMAIN' ] , path ) ) elif not raw and kwargs . get ( 'external' , False ) : if path . startswith ( '/' ) : path = path [ 1 : ] return '' . join ( ( request . host_url , path ) ) return path
Get the path to a static file for a given app entry of a given type .
58,277
def cdn_for ( endpoint , ** kwargs ) : if current_app . config [ 'CDN_DOMAIN' ] : if not current_app . config . get ( 'CDN_DEBUG' ) : kwargs . pop ( '_external' , None ) return cdn_url_for ( endpoint , ** kwargs ) return url_for ( endpoint , ** kwargs )
Get a CDN URL for a static assets .
58,278
def get_or_create ( self , write_concern = None , auto_save = True , * q_objs , ** query ) : defaults = query . pop ( 'defaults' , { } ) try : doc = self . get ( * q_objs , ** query ) return doc , False except self . _document . DoesNotExist : query . update ( defaults ) doc = self . _document ( ** query ) if auto_save : doc . save ( write_concern = write_concern ) return doc , True
Retrieve unique object or create if it doesn t exist .
58,279
def generic_in ( self , ** kwargs ) : query = { } for key , value in kwargs . items ( ) : if not value : continue if isinstance ( value , ( list , tuple ) ) and len ( value ) == 1 : value = value [ 0 ] if isinstance ( value , ( list , tuple ) ) : if all ( isinstance ( v , basestring ) for v in value ) : ids = [ ObjectId ( v ) for v in value ] query [ '{0}._ref.$id' . format ( key ) ] = { '$in' : ids } elif all ( isinstance ( v , DBRef ) for v in value ) : query [ '{0}._ref' . format ( key ) ] = { '$in' : value } elif all ( isinstance ( v , ObjectId ) for v in value ) : query [ '{0}._ref.$id' . format ( key ) ] = { '$in' : value } elif isinstance ( value , ObjectId ) : query [ '{0}._ref.$id' . format ( key ) ] = value elif isinstance ( value , basestring ) : query [ '{0}._ref.$id' . format ( key ) ] = ObjectId ( value ) else : self . error ( 'expect a list of string, ObjectId or DBRef' ) return self ( __raw__ = query )
Bypass buggy GenericReferenceField querying issue
58,280
def issues_notifications ( user ) : notifications = [ ] qs = issues_for ( user ) . only ( 'id' , 'title' , 'created' , 'subject' ) for issue in qs . no_dereference ( ) : notifications . append ( ( issue . created , { 'id' : issue . id , 'title' : issue . title , 'subject' : { 'id' : issue . subject [ '_ref' ] . id , 'type' : issue . subject [ '_cls' ] . lower ( ) , } } ) ) return notifications
Notify user about open issues
58,281
def get_config ( key ) : key = 'AVATAR_{0}' . format ( key . upper ( ) ) local_config = current_app . config . get ( key ) return local_config or getattr ( theme . current , key , DEFAULTS [ key ] )
Get an identicon configuration parameter .
58,282
def get_provider ( ) : name = get_config ( 'provider' ) available = entrypoints . get_all ( 'udata.avatars' ) if name not in available : raise ValueError ( 'Unknown avatar provider: {0}' . format ( name ) ) return available [ name ]
Get the current provider from config
58,283
def generate_pydenticon ( identifier , size ) : blocks_size = get_internal_config ( 'size' ) foreground = get_internal_config ( 'foreground' ) background = get_internal_config ( 'background' ) generator = pydenticon . Generator ( blocks_size , blocks_size , digest = hashlib . sha1 , foreground = foreground , background = background ) padding = int ( round ( get_internal_config ( 'padding' ) * size / 100. ) ) size = size - 2 * padding padding = ( padding , ) * 4 return generator . generate ( identifier , size , size , padding = padding , output_format = 'png' )
Use pydenticon to generate an identicon image . All parameters are extracted from configuration .
58,284
def adorable ( identifier , size ) : url = ADORABLE_AVATARS_URL . format ( identifier = identifier , size = size ) return redirect ( url )
Adorable Avatars provider
58,285
def licenses ( source = DEFAULT_LICENSE_FILE ) : if source . startswith ( 'http' ) : json_licenses = requests . get ( source ) . json ( ) else : with open ( source ) as fp : json_licenses = json . load ( fp ) if len ( json_licenses ) : log . info ( 'Dropping existing licenses' ) License . drop_collection ( ) for json_license in json_licenses : flags = [ ] for field , flag in FLAGS_MAP . items ( ) : if json_license . get ( field , False ) : flags . append ( flag ) license = License . objects . create ( id = json_license [ 'id' ] , title = json_license [ 'title' ] , url = json_license [ 'url' ] or None , maintainer = json_license [ 'maintainer' ] or None , flags = flags , active = json_license . get ( 'active' , False ) , alternate_urls = json_license . get ( 'alternate_urls' , [ ] ) , alternate_titles = json_license . get ( 'alternate_titles' , [ ] ) , ) log . info ( 'Added license "%s"' , license . title ) try : License . objects . get ( id = DEFAULT_LICENSE [ 'id' ] ) except License . DoesNotExist : License . objects . create ( ** DEFAULT_LICENSE ) log . info ( 'Added license "%s"' , DEFAULT_LICENSE [ 'title' ] ) success ( 'Done' )
Feed the licenses from a JSON file
58,286
def fetch_objects ( self , geoids ) : zones = [ ] no_match = [ ] for geoid in geoids : zone = GeoZone . objects . resolve ( geoid ) if zone : zones . append ( zone ) else : no_match . append ( geoid ) if no_match : msg = _ ( 'Unknown geoid(s): {identifiers}' ) . format ( identifiers = ', ' . join ( str ( id ) for id in no_match ) ) raise validators . ValidationError ( msg ) return zones
Custom object retrieval .
58,287
def lrun ( command , * args , ** kwargs ) : return run ( 'cd {0} && {1}' . format ( ROOT , command ) , * args , ** kwargs )
Run a local command from project root
58,288
def initialize ( self ) : fmt = guess_format ( self . source . url ) if not fmt : response = requests . head ( self . source . url ) mime_type = response . headers . get ( 'Content-Type' , '' ) . split ( ';' , 1 ) [ 0 ] if not mime_type : msg = 'Unable to detect format from extension or mime type' raise ValueError ( msg ) fmt = guess_format ( mime_type ) if not fmt : msg = 'Unsupported mime type "{0}"' . format ( mime_type ) raise ValueError ( msg ) graph = self . parse_graph ( self . source . url , fmt ) self . job . data = { 'graph' : graph . serialize ( format = 'json-ld' , indent = None ) }
List all datasets for a given ...
58,289
def get_tasks ( ) : return { name : get_task_queue ( name , cls ) for name , cls in celery . tasks . items ( ) if not name . startswith ( 'celery.' ) and not name . startswith ( 'test-' ) }
Get a list of known tasks with their routing queue
58,290
def tasks ( ) : tasks = get_tasks ( ) longest = max ( tasks . keys ( ) , key = len ) size = len ( longest ) for name , queue in sorted ( tasks . items ( ) ) : print ( '* {0}: {1}' . format ( name . ljust ( size ) , queue ) )
Display registered tasks with their queue
58,291
def status ( queue , munin , munin_config ) : if munin_config : return status_print_config ( queue ) queues = get_queues ( queue ) for queue in queues : status_print_queue ( queue , munin = munin ) if not munin : print ( '-' * 40 )
List queued tasks aggregated by name
58,292
def pre_validate ( self , form ) : for preprocessor in self . _preprocessors : preprocessor ( form , self ) super ( FieldHelper , self ) . pre_validate ( form )
Calls preprocessors before pre_validation
58,293
def process_formdata ( self , valuelist ) : super ( EmptyNone , self ) . process_formdata ( valuelist ) self . data = self . data or None
Replace empty values by None
58,294
def fetch_objects ( self , oids ) : objects = self . model . objects . in_bulk ( oids ) if len ( objects . keys ( ) ) != len ( oids ) : non_existants = set ( oids ) - set ( objects . keys ( ) ) msg = _ ( 'Unknown identifiers: {identifiers}' ) . format ( identifiers = ', ' . join ( str ( ne ) for ne in non_existants ) ) raise validators . ValidationError ( msg ) return [ objects [ id ] for id in oids ]
This methods is used to fetch models from a list of identifiers .
58,295
def validate ( self , form , extra_validators = tuple ( ) ) : if not self . has_data : return True if self . is_list_data : if not isinstance ( self . _formdata [ self . name ] , ( list , tuple ) ) : return False return super ( NestedModelList , self ) . validate ( form , extra_validators )
Perform validation only if data has been submitted
58,296
def _add_entry ( self , formdata = None , data = unset_value , index = None ) : if formdata : prefix = '-' . join ( ( self . name , str ( index ) ) ) basekey = '-' . join ( ( prefix , '{0}' ) ) idkey = basekey . format ( 'id' ) if prefix in formdata : formdata [ idkey ] = formdata . pop ( prefix ) if hasattr ( self . nested_model , 'id' ) and idkey in formdata : id = self . nested_model . id . to_python ( formdata [ idkey ] ) data = get_by ( self . initial_data , 'id' , id ) initial = flatten_json ( self . nested_form , data . to_mongo ( ) , prefix ) for key , value in initial . items ( ) : if key not in formdata : formdata [ key ] = value else : data = None return super ( NestedModelList , self ) . _add_entry ( formdata , data , index )
Fill the form with previous data if necessary to handle partial update
58,297
def parse ( self , data ) : self . field_errors = { } return dict ( ( k , self . _parse_value ( k , v ) ) for k , v in data . items ( ) )
Parse fields and store individual errors
58,298
def update ( site = False , organizations = False , users = False , datasets = False , reuses = False ) : do_all = not any ( ( site , organizations , users , datasets , reuses ) ) if do_all or site : log . info ( 'Update site metrics' ) update_site_metrics ( ) if do_all or datasets : log . info ( 'Update datasets metrics' ) for dataset in Dataset . objects . timeout ( False ) : update_metrics_for ( dataset ) if do_all or reuses : log . info ( 'Update reuses metrics' ) for reuse in Reuse . objects . timeout ( False ) : update_metrics_for ( reuse ) if do_all or organizations : log . info ( 'Update organizations metrics' ) for organization in Organization . objects . timeout ( False ) : update_metrics_for ( organization ) if do_all or users : log . info ( 'Update user metrics' ) for user in User . objects . timeout ( False ) : update_metrics_for ( user ) success ( 'All metrics have been updated' )
Update all metrics for the current date
58,299
def list ( ) : for cls , metrics in metric_catalog . items ( ) : echo ( white ( cls . __name__ ) ) for metric in metrics . keys ( ) : echo ( '> {0}' . format ( metric ) )
List all known metrics