code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
new_event = Event(new_index, self.user_data) new_event.time = self.time new_event.step_size = self.step_size return new_event
def split(self, new_index)
Create a new event which is a copy of this one but with a new index.
4.065165
3.067066
1.325425
if hasattr(event, 'time'): event.step_size = self.manager.step_size() event.time = self.manager.clock() + self.manager.step_size() for priority_bucket in self.listeners: for listener in sorted(priority_bucket, key=lambda x: x.__name__): listener(event) return event
def emit(self, event)
Notifies all listeners to this channel that an event has occurred. Parameters ---------- event : Event The event to be emitted.
5.227738
5.62066
0.930093
self.clock = builder.time.clock() self.step_size = builder.time.step_size()
def setup(self, builder)
Performs this components simulation setup. Parameters ---------- builder : vivarium.framework.engine.Builder Object giving access to core framework functionality.
5.541889
6.399233
0.866024
self._event_types[name].listeners[priority].append(listener)
def register_listener(self, name, listener, priority=5)
Registers a new listener to the named event. Parameters ---------- name : str The name of the event. listener : Callable The consumer of the named event. priority : int Number in range(10) used to assign the ordering in which listeners process the event.
10.876117
12.270814
0.88634
return self._event_manager.get_emitter(name)
def get_emitter(self, name: str) -> Callable[[Event], Event]
Gets and emitter for a named event. Parameters ---------- name : The name of the event he requested emitter will emit. Users may provide their own named events by requesting an emitter with this function, but should do so with caution as it makes time much more difficult to think about. Returns ------- An emitter for the named event. The emitter should be called by the requesting component at the appropriate point in the simulation lifecycle.
7.873981
15.97967
0.49275
self._event_manager.register_listener(name, listener, priority)
def register_listener(self, name: str, listener: Callable[[Event], None], priority: int=5) -> None
Registers a callable as a listener to a events with the given name. The listening callable will be called with a named ``Event`` as it's only argument any time the event emitter is invoked from somewhere in the simulation. The framework creates the following events and emits them at different points in the simulation: At the end of the setup phase: ``post_setup`` Every time step: ``time_step__prepare``, ``time_step``, ``time_step__cleanup``, ``collect_metrics`` At simulation end: ``simulation_end`` Parameters ---------- name : The name of the event to listen for. listener : The callable to be invoked any time an ``Event`` with the given name is emitted. priority : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} An indication of the order in which event listeners should be called. Listeners with smaller priority values will be called earlier. Listeners with the same priority have no guaranteed ordering. This feature should be avoided if possible. Components should strive to obey the Markov property as they transform the state table (the state of the simulation at the beginning of the next time step should only depend on the current state of the system).
6.38831
6.463611
0.98835
super().setup(builder) self.clock = builder.time.clock() self.excess_mortality_rate = builder.value.register_rate_producer( f'{self.state_id}.excess_mortality_rate', source=self.risk_deleted_excess_mortality_rate ) self.excess_mortality_rate_paf = builder.value.register_value_producer( f'{self.state_id}.excess_mortality_rate.population_attributable_fraction', source=lambda index: [pd.Series(0, index=index)], preferred_combiner=list_combiner, preferred_post_processor=joint_value_post_processor ) builder.value.register_value_modifier('mortality_rate', self.add_in_excess_mortality) self.population_view = builder.population.get_view( [self._model], query=f"alive == 'alive' and {self._model} == '{self.state_id}'")
def setup(self, builder)
Performs this component's simulation setup. Parameters ---------- builder : `engine.Builder` Interface to several simulation tools.
6.401775
6.429315
0.995717
pop = self.manager.get_population(True).loc[index] if self._query: pop = pop.query(self._query) if query: pop = pop.query(query) if not self._columns: return pop else: if omit_missing_columns: columns = list(set(self._columns).intersection(pop.columns)) else: columns = self._columns try: return pop[columns].copy() except KeyError: non_existent_columns = set(columns) - set(pop.columns) raise PopulationError(f'Requested column(s) {non_existent_columns} not in population table.')
def get(self, index: pd.Index, query: str='', omit_missing_columns: bool=False) -> pd.DataFrame
For the rows in ``index`` get the columns from the simulation's population which this view is configured. The result may be further filtered by the view's query. Parameters ---------- index : Index of the population to get. query : Conditions used to filter the index. May use columns not in the requested view. omit_missing_columns : Silently skip loading columns which are not present in the population table. In general you want this to be False because that situation indicates an error but sometimes, like during population initialization, it can be convenient to just load whatever data is actually available. Returns ------- pd.DataFrame A table with the subset of the population requested.
2.744781
2.727167
1.006459
if not pop.empty: if isinstance(pop, pd.Series): if pop.name in self._columns: affected_columns = [pop.name] elif len(self._columns) == 1: affected_columns = self._columns else: raise PopulationError('Cannot update with a Series unless the series name equals a column ' 'name or there is only a single column in the view') else: affected_columns = set(pop.columns) affected_columns = set(affected_columns).intersection(self._columns) state_table = self.manager.get_population(True) if not self.manager.growing: affected_columns = set(affected_columns).intersection(state_table.columns) for c in affected_columns: if c in state_table: v = state_table[c].values if isinstance(pop, pd.Series): v2 = pop.values else: v2 = pop[c].values v[pop.index] = v2 if v.dtype != v2.dtype: # This happens when the population is being grown because extending # the index forces columns that don't have a natural null type # to become 'object' if not self.manager.growing: raise PopulationError('Component corrupting population table. ' 'Old column type: {} New column type: {}'.format(v.dtype, v2.dtype)) v = v.astype(v2.dtype) else: if isinstance(pop, pd.Series): v = pop.values else: v = pop[c].values self.manager._population[c] = v
def update(self, pop: Union[pd.DataFrame, pd.Series])
Update the simulation's state to match ``pop`` Parameters ---------- pop : The data which should be copied into the simulation's state. If ``pop`` is a DataFrame only those columns included in the view's columns will be used. If ``pop`` is a Series it must have a name that matches one of the view's columns unless the view only has one column in which case the Series will be assumed to refer to that regardless of its name.
3.612108
3.395793
1.063701
if 'tracked' not in columns: query_with_track = query + 'and tracked == True' if query else 'tracked == True' return PopulationView(self, columns, query_with_track) return PopulationView(self, columns, query)
def get_view(self, columns: Sequence[str], query: str=None) -> PopulationView
Return a configured PopulationView Notes ----- Client code should only need this (and only through the version exposed as ``population_view`` on the builder during setup) if it uses dynamically generated column names that aren't known at definition time. Otherwise components should use ``uses_columns``.
3.882117
4.322356
0.898148
return self._population_manager.get_view(columns, query)
def get_view(self, columns: Sequence[str], query: str = None) -> PopulationView
Get a time-varying view of the population state table. The requested population view can be used to view the current state or to update the state with new values. Parameters ---------- columns : A subset of the state table columns that will be available in the returned view. query : A filter on the population state. This filters out particular rows (simulants) based on their current state. The query should be provided in a way that is understood by the ``pandas.DataFrame.query`` method and may reference state table columns not requested in the ``columns`` argument. Returns ------- PopulationView A filtered view of the requested columns of the population state table.
6.915161
11.495852
0.601535
return self._population_manager.get_simulant_creator()
def get_simulant_creator(self) -> Callable[[int, Union[Mapping[str, Any], None]], pd.Index]
Grabs a reference to the function that creates new simulants (adds rows to the state table). Returns ------- Callable The simulant creator function. The creator function takes the number of simulants to be created as it's first argument and a dict or other mapping of population configuration that will be available to simulant initializers as it's second argument. It generates the new rows in the population state table and then calls each initializer registered with the population system with a data object containing the state table index of the new simulants, the configuration info passed to the creator, the current simulation time, and the size of the next time step.
8.796659
12.212546
0.720297
self._population_manager.register_simulant_initializer(initializer, creates_columns, requires_columns)
def initializes_simulants(self, initializer: Callable[[SimulantData], None], creates_columns: Sequence[str]=(), requires_columns: Sequence[str]=())
Marks a callable as a source of initial state information for new simulants. Parameters ---------- initializer : A callable that adds or updates initial state information about new simulants. creates_columns : A list of the state table columns that the given initializer provides the initial state information for. requires_columns : A list of the state table columns that already need to be present and populated in the state table before the provided initializer is called.
6.36413
5.5131
1.154365
# decodifica y pasa a minúsculas if decode: title = unidecode(title) title = title.lower() # remueve caracteres no permitidos filtered_title = re.sub(r'[^a-z0-9- ]+', '', title) # remueve stop words y espacios y une palabras sólo con un "-" normalized_title = '-'.join([word for word in filtered_title.split() if word not in STOP_WORDS]) # recorto el titulo normalizado si excede la longitud máxima if max_len and len(normalized_title) > max_len: # busco la última palabra completa if use_complete_words: last_word_index = normalized_title.rindex("-", 0, max_len) normalized_title = normalized_title[:last_word_index] # corto en el último caracter else: normalized_title = normalized_title[:max_len] return normalized_title
def title_to_name(title, decode=True, max_len=None, use_complete_words=True)
Convierte un título en un nombre normalizado para generar urls.
3.312383
3.124032
1.060291
try: result = urlparse(uri_string) has_elements = all([result.scheme, result.netloc, result.path]) is_http = result.scheme == "http" or result.scheme == "https" return True if has_elements and is_http else False except Exception: return False
def validate_url(uri_string)
Valida si un string es una URI válida.
2.389983
2.360968
1.01229
if directory and not os.path.exists(directory): os.makedirs(directory)
def ensure_dir_exists(directory)
Se asegura de que un directorio exista.
2.545976
2.61631
0.973117
for key in keys: if isinstance(dicc, dict) and key in dicc: dicc = dicc[key] elif (isinstance(dicc, list) and isinstance(key, int) and key < len(dicc)): dicc = dicc[key] else: return default_value return dicc
def traverse_dict(dicc, keys, default_value=None)
Recorre un diccionario siguiendo una lista de claves, y devuelve default_value en caso de que alguna de ellas no exista. Args: dicc (dict): Diccionario a ser recorrido. keys (list): Lista de claves a ser recorrida. Puede contener índices de listas y claves de diccionarios mezcladas. default_value: Valor devuelto en caso de que `dicc` no se pueda recorrer siguiendo secuencialmente la lista de `keys` hasta el final. Returns: object: El valor obtenido siguiendo la lista de `keys` dentro de `dicc`.
1.715685
1.898308
0.903797
if isinstance(list_of_dicts, list) and len(list_of_dicts) == 0: return False is_not_list_msg = .format(list_of_dicts) assert isinstance(list_of_dicts, list), is_not_list_msg not_all_dicts_msg = .format(list_of_dicts) assert all([isinstance(d, dict) for d in list_of_dicts]), not_all_dicts_msg # Si no se pasan expected_keys, se las toma del primer diccionario expected_keys = expected_keys or set(list_of_dicts[0].keys()) elements = [set(d.keys()) == expected_keys for d in list_of_dicts] return all(elements)
def is_list_of_matching_dicts(list_of_dicts, expected_keys=None)
Comprueba que una lista esté compuesta únicamente por diccionarios, que comparten exactamente las mismas claves. Args: list_of_dicts (list): Lista de diccionarios a comparar. expected_keys (set): Conjunto de las claves que cada diccionario debe tener. Si no se incluye, se asume que son las claves del primer diccionario de la lista. Returns: bool: True si todos los diccionarios comparten sus claves.
2.654031
2.411828
1.100423
value = cell.value # stripea espacios en strings if isinstance(value, string_types): value = value.strip() # convierte a texto ISO 8601 las fechas if isinstance(value, (datetime)): value = value.isoformat() return value
def parse_value(cell)
Extrae el valor de una celda de Excel como texto.
6.84697
5.593485
1.224097
headers = [] value_rows = [] for row_i, row in enumerate(worksheet.iter_rows()): # lee los headers y el tamaño máximo de la hoja en columnas en fila 1 if row_i == 0: for header_cell in row: if header_cell.value: headers.append(parse_value(header_cell)) else: break continue # limita la cantidad de celdas a considerar, por la cantidad de headers row_cells = [parse_value(cell) for index, cell in enumerate(row) if index < len(headers)] # agrega las filas siguientes que tengan al menos un campo no nulo if any(row_cells): value_rows.append(row_cells) # no se admiten filas vacías, eso determina el fin de la hoja else: break # convierte las filas en diccionarios con los headers como keys table = [ # Ignoro los campos con valores nulos (None) {k: v for (k, v) in zip(headers, row) if v is not None} for row in value_rows ] return table
def sheet_to_table(worksheet)
Transforma una hoja de libro de Excel en una lista de diccionarios. Args: worksheet (Workbook.worksheet): Hoja de cálculo de un archivo XLSX según los lee `openpyxl` Returns: list_of_dicts: Lista de diccionarios, con tantos elementos como registros incluya la hoja, y con tantas claves por diccionario como campos tenga la hoja.
3.851147
3.753789
1.025936
return [value.strip() for value in string.split(sep) if (not filter_empty or value)]
def string_to_list(string, sep=",", filter_empty=False)
Transforma una string con elementos separados por `sep` en una lista.
3.707558
3.743107
0.990503
result = other_dict.copy() for k, v in one_dict.items(): if v is None: v = 0 if isinstance(v, dict): result[k] = add_dicts(v, other_dict.get(k, {})) else: other_value = result.get(k, 0) if other_value is None: other_value = 0 result[k] = other_value + v return result
def add_dicts(one_dict, other_dict)
Suma clave a clave los dos diccionarios. Si algún valor es un diccionario, llama recursivamente a la función. Ambos diccionarios deben tener exactamente las mismas claves, y los valores asociados deben ser sumables, o diccionarios. Args: one_dict (dict) other_dict (dict) Returns: dict: resultado de la suma
1.908468
2.186397
0.872883
intervals = { 'Y': 365, 'M': 30, 'W': 7, 'D': 1, 'H': 0, 'S': 0 } if date_str.find('R/P') != 0: # Periodicity mal formada return 0 date_str = date_str.strip('R/P') days = 0 index = 0 for interval in intervals: value_end = date_str.find(interval) if value_end < 0: continue try: days += int(float(date_str[index:value_end]) * intervals[interval]) # Valor de accrualPeriodicity inválido, se toma como 0 except ValueError: continue index = value_end # Si el número de días es menor lo redondeamos a 1 return max(days, 1)
def parse_repeating_time_interval_to_days(date_str)
Parsea un string con un intervalo de tiempo con repetición especificado por la norma ISO 8601 en una cantidad de días que representa ese intervalo. Devuelve 0 en caso de que el intervalo sea inválido.
4.042652
3.483401
1.160548
with open(os.path.join(ABSOLUTE_SCHEMA_DIR, "accrualPeriodicity.json"), "r") as f: freqs_map = {freq["id"]: freq["description"] for freq in json.load(f)} return freqs_map[date_str]
def parse_repeating_time_interval_to_str(date_str)
Devuelve descripción humana de un intervalo de repetición. TODO: Por ahora sólo interpreta una lista fija de intervalos. Debería poder parsear cualquier caso.
5.128776
4.938097
1.038614
if isinstance(wb, string_types): # FIXME: importar o borrar segun corresponda wb = load_workbook(wb, read_only=True, data_only=True) for sheetname in wb.sheetnames: if sheetname.lower() == name.lower(): return sheetname raise Exception("No existe la hoja {}".format(name))
def find_ws_name(wb, name)
Busca una hoja en un workbook sin importar mayúsculas/minúsculas.
3.933946
3.121079
1.260444
dataset_is_equal = True dataset_diff = [] # Campos a comparar. Si es un campo anidado escribirlo como lista if not fields_dataset: fields_dataset = [ 'title', ['publisher', 'name'] ] for field_dataset in fields_dataset: if isinstance(field_dataset, list): value = traverse_dict(dataset, field_dataset) other_value = traverse_dict(other, field_dataset) else: value = dataset.get(field_dataset) other_value = other.get(field_dataset) if value != other_value: dataset_diff.append({ "error_location": field_dataset, "dataset_value": value, "other_value": other_value }) dataset_is_equal = False if fields_distribution: dataset_distributions = dataset.get("distribution") other_distributions = other.get("distribution") if len(dataset_distributions) != len(other_distributions): logger.info("{} distribuciones en origen y {} en destino".format( len(dataset_distributions), len(other_distributions))) dataset_is_equal = False distributions_equal = True for dataset_distribution, other_distribution in zip( dataset_distributions, other_distributions): for field_distribution in fields_distribution: if isinstance(field_distribution, list): value = traverse_dict( dataset_distribution, field_distribution) other_value = traverse_dict( other_distribution, field_distribution) else: value = dataset_distribution.get(field_distribution) other_value = other_distribution.get(field_distribution) if value != other_value: dataset_diff.append({ "error_location": "{} ({})".format( field_distribution, dataset_distribution.get("title") ), "dataset_value": value, "other_value": other_value }) distributions_equal = False if not distributions_equal: dataset_is_equal = False if return_diff: return dataset_diff else: return dataset_is_equal
def datasets_equal(dataset, other, fields_dataset=None, fields_distribution=None, return_diff=False)
Función de igualdad de dos datasets: se consideran iguales si los valores de los campos 'title', 'publisher.name', 'accrualPeriodicity' e 'issued' son iguales en ambos. Args: dataset (dict): un dataset, generado por la lectura de un catálogo other (dict): idem anterior Returns: bool: True si son iguales, False en caso contrario
1.901198
1.874734
1.014116
for dataset in catalog.get("dataset", []): for distribution_index, distribution in enumerate( dataset.get("distribution", [])): if "identifier" not in distribution: distribution["identifier"] = "{}_{}".format( dataset["identifier"], distribution_index)
def generate_distribution_ids(catalog)
Genera identificadores para las distribuciones que no los tienen. Los identificadores de distribuciones se generan concatenando el id del dataset al que pertenecen con el índice posicional de la distribución en el dataset: distribution_identifier = "{dataset_identifier}_{index}".
3.036587
2.39891
1.265819
try: label = catalog.get_theme(identifier=theme)['label'] except BaseException: try: label = catalog.get_theme(label=theme)['label'] except BaseException: raise ce.ThemeNonExistentError(theme) label = re.sub(r'[^\wá-úÁ-ÚñÑ .-]+', '', label, flags=re.UNICODE) return label
def _get_theme_label(catalog, theme)
Intenta conseguir el theme por id o por label.
3.808314
3.318364
1.147648
if isinstance(catalogs, list): for catalog in catalogs: try: make_catalog_backup( catalog, local_catalogs_dir=local_catalogs_dir, include_metadata=include_metadata, include_metadata_xlsx=include_metadata_xlsx, include_data=include_data, use_short_path=use_short_path) except Exception: logger.exception("ERROR en {}".format(catalog)) elif isinstance(catalogs, dict): for catalog_id, catalog in catalogs.iteritems(): try: make_catalog_backup( catalog, catalog_id, local_catalogs_dir=local_catalogs_dir, include_metadata=include_metadata, include_metadata_xlsx=include_metadata_xlsx, include_data=include_data, use_short_path=use_short_path) except Exception: logger.exception( "ERROR en {} ({})".format(catalog, catalog_id))
def make_catalogs_backup(catalogs, local_catalogs_dir="", include_metadata=True, include_data=True, include_metadata_xlsx=False, use_short_path=False)
Realiza una copia local de los datos y metadatos de un catálogo. Args: catalogs (list or dict): Lista de catálogos (elementos que pueden ser interpretados por DataJson como catálogos) o diccionario donde las keys se interpretan como los catalog_identifier: { "modernizacion": "http://infra.datos.gob.ar/catalog/modernizacion/data.json" } Cuando es una lista, los ids se toman de catalog_identifer, y se ignoran los catálogos que no tengan catalog_identifier. Cuando se pasa un diccionario, los keys reemplazan a los catalog_identifier (estos no se leeen). catalog_id (str): Si se especifica, se usa este identificador para el backup. Si no se espedifica, se usa catalog["identifier"]. local_catalogs_dir (str): Directorio local en el cual se va a crear la carpeta "catalog/..." con todos los catálogos. include_metadata (bool): Si es verdadero, se generan los archivos data.json y catalog.xlsx. include_data (bool): Si es verdadero, se descargan todas las distribuciones de todos los catálogos. Return: None
1.539614
1.527021
1.008247
catalog = pydatajson.DataJson(catalog) catalog_identifier = catalog_id if catalog_id else catalog["identifier"] if include_metadata: logger.info( "Descargando catálogo {}".format( catalog_identifier.ljust(30))) # catálogo en json catalog_path = get_catalog_path(catalog_identifier, local_catalogs_dir) ensure_dir_exists(os.path.dirname(catalog_path)) catalog.to_json(catalog_path) if include_metadata_xlsx: # catálogo en xlsx catalog_path = get_catalog_path( catalog_identifier, local_catalogs_dir, fmt="xlsx") ensure_dir_exists(os.path.dirname(catalog_path)) catalog.to_xlsx(catalog_path) if include_data: distributions = catalog.distributions distributions_num = len(distributions) for index, distribution in enumerate(distributions): logger.info("Descargando distribución {} de {} ({})".format( index + 1, distributions_num, catalog_identifier)) dataset_id = distribution["dataset_identifier"] if include_datasets and (dataset_id not in include_datasets): pass else: distribution_id = distribution["identifier"] distribution_download_url = distribution["downloadURL"] # si no se especifica un file name, se toma de la URL distribution_file_name = distribution.get( "fileName", distribution_download_url[ distribution_download_url.rfind("/") + 1:] ) # si no espicifica un formato, toma de distribution_file_name # asume que formato está al menos en distribution_file_name distribution_format = distribution.get( "format", distribution_file_name[ distribution_file_name.rfind(".") + 1:] ) if (include_distribution_formats and (distribution_format not in include_distribution_formats)): pass else: # genera el path local donde descargar el archivo file_path = get_distribution_path( catalog_identifier, dataset_id, distribution_id, distribution_file_name, local_catalogs_dir, use_short_path=use_short_path) ensure_dir_exists(os.path.dirname(file_path)) # decarga el archivo download_to_file(distribution_download_url, file_path)
def make_catalog_backup(catalog, catalog_id=None, local_catalogs_dir="", include_metadata=True, include_data=True, include_datasets=None, include_distribution_formats=['CSV', 'XLS'], include_metadata_xlsx=True, use_short_path=False)
Realiza una copia local de los datos y metadatos de un catálogo. Args: catalog (dict or str): Representación externa/interna de un catálogo. Una representación _externa_ es un path local o una URL remota a un archivo con la metadata de un catálogo, en formato JSON o XLSX. La representación _interna_ de un catálogo es un diccionario. catalog_id (str): Si se especifica, se usa este identificador para el backup. Si no se especifica, se usa catalog["identifier"]. local_catalogs_dir (str): Directorio local en el cual se va a crear la carpeta "catalog/..." con todos los catálogos. include_metadata (bool): Si es verdadero, se generan los archivos data.json y catalog.xlsx. include_data (bool): Si es verdadero, se descargan todas las distribuciones de todos los catálogos. include_datasets (list): Si se especifica, se descargan únicamente los datasets indicados. Si no, se descargan todos. include_distribution_formats (list): Si se especifica, se descargan únicamente las distribuciones de los formatos indicados. Si no, se descargan todas. use_short_path (bool): No implementado. Si es verdadero, se utiliza una jerarquía de directorios simplificada. Caso contrario, se replica la existente en infra. Return: None
2.454504
2.422487
1.013217
if use_short_path: catalog_path = os.path.join(catalogs_dir, "catalog", catalog_id) distribution_dir = os.path.join(catalog_path, dataset_id) else: catalog_path = os.path.join(catalogs_dir, "catalog", catalog_id) dataset_path = os.path.join(catalog_path, "dataset", dataset_id) distribution_dir = os.path.join( dataset_path, "distribution", distribution_id) return os.path.abspath(distribution_dir)
def get_distribution_dir(catalog_id, dataset_id, distribution_id, catalogs_dir=CATALOGS_DIR, use_short_path=False)
Genera el path estándar de un catálogo en un filesystem.
1.710369
1.677616
1.019523
if use_short_path: distribution_dir = get_distribution_dir( catalog_id, dataset_id, distribution_id, catalogs_dir, use_short_path=True) distribution_file_path = os.path.join( distribution_dir, distribution_file_name) else: distribution_dir = get_distribution_dir( catalog_id, dataset_id, distribution_id, catalogs_dir, use_short_path=False) distribution_file_path = os.path.join( distribution_dir, "download", distribution_file_name) return os.path.abspath(distribution_file_path)
def get_distribution_path(catalog_id, dataset_id, distribution_id, distribution_file_name, catalogs_dir=CATALOGS_DIR, use_short_path=False)
Genera el path estándar de un catálogo en un filesystem.
1.552751
1.54069
1.007829
base_path = os.path.join(catalogs_dir, "catalog", catalog_id) if fmt == "json": return os.path.join(base_path, "data.json") elif fmt == "xlsx": return os.path.join(base_path, "catalog.xlsx") else: raise NotImplementedError("El formato {} no está implementado.".format( fmt))
def get_catalog_path(catalog_id, catalogs_dir=CATALOGS_DIR, fmt="json")
Genera el path estándar de un catálogo en un filesystem.
2.494748
2.12277
1.175232
include_data = bool(int(include_data)) make_catalogs_backup(catalogs.split( ","), include_data=include_data, use_short_path=use_short_path)
def main(catalogs, include_data=True, use_short_path=True)
Permite hacer backups de uno o más catálogos por línea de comandos. Args: catalogs (str): Lista de catálogos separados por coma (URLs o paths locales) para hacer backups.
4.205976
6.553994
0.641742
# Si se paso una ruta, guardarla if isinstance(catalog, string_types): catalog_path_or_url = catalog else: catalog_path_or_url = None catalog = read_catalog(catalog) validation = validate_catalog(catalog) # Solo necesito indicadores para un catalogo indicators = generate_catalogs_indicators( catalog, CENTRAL_CATALOG)[0][0] with io.open(os.path.join(TEMPLATES_PATH, 'catalog_readme.txt'), 'r', encoding='utf-8') as template_file: readme_template = template_file.read() not_federated_datasets_list = "\n".join([ "- [{}]({})".format(dataset[0], dataset[1]) for dataset in indicators["datasets_no_federados"] ]) federated_removed_datasets_list = "\n".join([ "- [{}]({})".format(dataset[0], dataset[1]) for dataset in indicators["datasets_federados_eliminados"] ]) federated_datasets_list = "\n".join([ "- [{}]({})".format(dataset[0], dataset[1]) for dataset in indicators["datasets_federados"] ]) non_federated_pct = 1.0 - indicators["datasets_federados_pct"] if \ indicators["datasets_federados_pct"] is not None else \ indicators["datasets_federados_pct"] content = { "title": catalog.get("title"), "publisher_name": traverse_dict( catalog, ["publisher", "name"]), "publisher_mbox": traverse_dict( catalog, ["publisher", "mbox"]), "catalog_path_or_url": catalog_path_or_url, "description": catalog.get("description"), "global_status": validation["status"], "catalog_status": validation["error"]["catalog"]["status"], "no_of_datasets": len(catalog["dataset"]), "no_of_distributions": sum([len(dataset["distribution"]) for dataset in catalog["dataset"]]), "federated_datasets": indicators["datasets_federados_cant"], "not_federated_datasets": indicators["datasets_no_federados_cant"], "not_federated_datasets_pct": non_federated_pct, "not_federated_datasets_list": not_federated_datasets_list, "federated_removed_datasets_list": federated_removed_datasets_list, "federated_datasets_list": federated_datasets_list, } catalog_readme = readme_template.format(**content) if export_path: with io.open(export_path, 'w+', encoding='utf-8') as target: target.write(catalog_readme) else: return catalog_readme
def generate_readme(catalog, export_path=None)
Genera una descripción textual en formato Markdown sobre los metadatos generales de un catálogo (título, editor, fecha de publicación, et cetera), junto con: - estado de los metadatos a nivel catálogo, - estado global de los metadatos, - cantidad de datasets federados y no federados, - detalles de los datasets no federados - cantidad de datasets y distribuciones incluidas Es utilizada por la rutina diaria de `libreria-catalogos` para generar un README con información básica sobre los catálogos mantenidos. Args: catalog (str o dict): Path a un catálogo en cualquier formato, JSON, XLSX, o diccionario de python. export_path (str): Path donde exportar el texto generado (en formato Markdown). Si se especifica, el método no devolverá nada. Returns: str: Texto de la descripción generada.
2.533538
2.434985
1.040474
self.config = builder.configuration self.with_common_random_numbers = bool(self.config.randomness.key_columns) self.register = builder.randomness.register_simulants if (self.with_common_random_numbers and not ['entrance_time', 'age'] == self.config.randomness.key_columns): raise ValueError("If running with CRN, you must specify ['entrance_time', 'age'] as" "the randomness key columns.") self.age_randomness = builder.randomness.get_stream('age_initialization', for_initialization=self.with_common_random_numbers) self.sex_randomness = builder.randomness.get_stream('sex_initialization') columns_created = ['age', 'sex', 'alive', 'entrance_time'] builder.population.initializes_simulants(self.on_initialize_simulants, creates_columns=columns_created) self.population_view = builder.population.get_view(columns_created) builder.event.register_listener('time_step', self.age_simulants)
def setup(self, builder: Builder)
Performs this component's simulation setup. The ``setup`` method is automatically called by the simulation framework. The framework passes in a ``builder`` object which provides access to a variety of framework subsystems and metadata. Parameters ---------- builder : Access to simulation tools and subsystems.
5.913236
5.764662
1.025773
age_start = self.config.population.age_start age_end = self.config.population.age_end if age_start == age_end: age_window = pop_data.creation_window / pd.Timedelta(days=365) else: age_window = age_end - age_start age_draw = self.age_randomness.get_draw(pop_data.index) age = age_start + age_draw * age_window if self.with_common_random_numbers: population = pd.DataFrame({'entrance_time': pop_data.creation_time, 'age': age.values}, index=pop_data.index) self.register(population) population['sex'] = self.sex_randomness.choice(pop_data.index, ['Male', 'Female']) population['alive'] = 'alive' else: population = pd.DataFrame( {'age': age.values, 'sex': self.sex_randomness.choice(pop_data.index, ['Male', 'Female']), 'alive': pd.Series('alive', index=pop_data.index), 'entrance_time': pop_data.creation_time}, index=pop_data.index) self.population_view.update(population)
def on_initialize_simulants(self, pop_data: SimulantData)
Called by the simulation whenever new simulants are added. This component is responsible for creating and filling four columns in the population state table: 'age' : The age of the simulant in fractional years. 'sex' : The sex of the simulant. One of {'Male', 'Female'} 'alive' : Whether or not the simulant is alive. One of {'alive', 'dead'} 'entrance_time' : The time that the simulant entered the simulation. The 'birthday' for simulants that enter as newborns. A `pandas.Timestamp`. Parameters ---------- pop_data : A record containing the index of the new simulants, the start of the time step the simulants are added on, the width of the time step, and the age boundaries for the simulants to generate.
3.232608
2.802257
1.153573
population = self.population_view.get(event.index, query="alive == 'alive'") population['age'] += event.step_size / pd.Timedelta(days=365) self.population_view.update(population)
def age_simulants(self, event: Event)
Updates simulant age on every time step. Parameters ---------- event : An event object emitted by the simulation containing an index representing the simulants affected by the event and timing information.
8.215494
8.711199
0.943096
assert isinstance(catalogs, string_types + (dict, list)) # Si se pasa un único catálogo, genero una lista que lo contenga if isinstance(catalogs, string_types + (dict,)): catalogs = [catalogs] indicators_list = [] # Cuenta la cantidad de campos usados/recomendados a nivel global fields = {} catalogs_cant = 0 for catalog in catalogs: try: catalog = readers.read_catalog(catalog) catalogs_cant += 1 except Exception as e: msg = u'Error leyendo catálogo de la lista: {}'.format(str(e)) logger.error(msg) continue fields_count, result = _generate_indicators( catalog, validator=validator) if central_catalog: result.update(_federation_indicators( catalog, central_catalog, identifier_search=identifier_search)) if not indicators_list: # La primera iteracion solo copio el primer resultado network_indicators = result.copy() else: network_indicators = helpers.add_dicts(network_indicators, result) # Sumo a la cuenta total de campos usados/totales fields = helpers.add_dicts(fields_count, fields) result['title'] = catalog.get('title', 'no-title') result['identifier'] = catalog.get('identifier', 'no-id') indicators_list.append(result) if not indicators_list: # No se pudo leer ningún catálogo return [], {} # Indicadores de la red entera network_indicators['catalogos_cant'] = catalogs_cant # Genero los indicadores de la red entera, _network_indicator_percentages(fields, network_indicators) return indicators_list, network_indicators
def generate_catalogs_indicators(catalogs, central_catalog=None, identifier_search=False, validator=None)
Genera una lista de diccionarios con varios indicadores sobre los catálogos provistos, tales como la cantidad de datasets válidos, días desde su última fecha actualizada, entre otros. Args: catalogs (str o list): uno o más catalogos sobre los que se quiera obtener indicadores central_catalog (str): catálogo central sobre el cual comparar los datasets subidos en la lista anterior. De no pasarse no se generarán indicadores de federación de datasets. Returns: tuple: 2 elementos, el primero una lista de diccionarios con los indicadores esperados, uno por catálogo pasado, y el segundo un diccionario con indicadores a nivel global, datos sobre la lista entera en general.
4.173791
4.029204
1.035885
result = {} # Obtengo summary para los indicadores del estado de los metadatos result.update(_generate_status_indicators(catalog, validator=validator)) # Genero los indicadores relacionados con fechas, y los agrego result.update( _generate_date_indicators(catalog, only_numeric=only_numeric)) # Agrego la cuenta de los formatos de las distribuciones if not only_numeric: if 'dataset' in catalog: format_count = count_fields(get_distributions(catalog), 'format') type_count = count_fields(get_distributions(catalog), 'type') license_count = count_fields(get_datasets(catalog), 'license') else: format_count = type_count = license_count = {} result.update({ 'distribuciones_formatos_cant': format_count, 'distribuciones_tipos_cant': type_count, 'datasets_licencias_cant': license_count, }) # Agrego porcentaje de campos recomendados/optativos usados fields_count = _count_required_and_optional_fields(catalog) recomendados_pct = float(fields_count['recomendado']) / \ fields_count['total_recomendado'] optativos_pct = float(fields_count['optativo']) / \ fields_count['total_optativo'] result.update({ 'campos_recomendados_pct': round(recomendados_pct, 4), 'campos_optativos_pct': round(optativos_pct, 4) }) return fields_count, result
def _generate_indicators(catalog, validator=None, only_numeric=False)
Genera los indicadores de un catálogo individual. Args: catalog (dict): diccionario de un data.json parseado Returns: dict: diccionario con los indicadores del catálogo provisto
2.99279
3.014523
0.992791
result = { 'datasets_federados_cant': None, 'datasets_federados_pct': None, 'datasets_no_federados_cant': None, 'datasets_federados_eliminados_cant': None, 'distribuciones_federadas_cant': None, 'datasets_federados_eliminados': [], 'datasets_no_federados': [], 'datasets_federados': [], } try: central_catalog = readers.read_catalog(central_catalog) except Exception as e: msg = u'Error leyendo el catálogo central: {}'.format(str(e)) logger.error(msg) return result generator = FederationIndicatorsGenerator(central_catalog, catalog, id_based=identifier_search) result.update({ 'datasets_federados_cant': generator.datasets_federados_cant(), 'datasets_no_federados_cant': generator.datasets_no_federados_cant(), 'datasets_federados_eliminados_cant': generator.datasets_federados_eliminados_cant(), 'datasets_federados_eliminados': generator.datasets_federados_eliminados(), 'datasets_no_federados': generator.datasets_no_federados(), 'datasets_federados': generator.datasets_federados(), 'datasets_federados_pct': generator.datasets_federados_pct(), 'distribuciones_federadas_cant': generator.distribuciones_federadas_cant() }) return result
def _federation_indicators(catalog, central_catalog, identifier_search=False)
Cuenta la cantidad de datasets incluídos tanto en la lista 'catalogs' como en el catálogo central, y genera indicadores a partir de esa información. Args: catalog (dict): catálogo ya parseado central_catalog (str o dict): ruta a catálogo central, o un dict con el catálogo ya parseado
1.992385
1.941607
1.026152
# Los porcentuales no se pueden sumar, tienen que ser recalculados percentages = { 'datasets_meta_ok_pct': (network_indicators.get('datasets_meta_ok_cant'), network_indicators.get('datasets_meta_error_cant')), 'datasets_actualizados_pct': (network_indicators.get('datasets_actualizados_cant'), network_indicators.get('datasets_desactualizados_cant')), 'datasets_federados_pct': (network_indicators.get('datasets_federados_cant'), network_indicators.get('datasets_no_federados_cant')), 'datasets_con_datos_pct': (network_indicators.get('datasets_con_datos_cant'), network_indicators.get('datasets_sin_datos_cant')), } for indicator in percentages: pct = 0.00 partial = percentages[indicator][0] or 0 total = partial + (percentages[indicator][1] or 0) # Evita division por 0 if total: pct = float(partial) / total network_indicators[indicator] = round(pct, 4) # % de campos recomendados y optativos utilizados en el catálogo entero if fields: # 'fields' puede estar vacío si ningún campo es válido rec_pct = float(fields['recomendado']) / \ fields['total_recomendado'] opt_pct = float(fields['optativo']) / fields['total_optativo'] network_indicators.update({ 'campos_recomendados_pct': round(rec_pct, 4), 'campos_optativos_pct': round(opt_pct, 4) })
def _network_indicator_percentages(fields, network_indicators)
Encapsula el cálculo de indicadores de porcentaje (de errores, de campos recomendados/optativos utilizados, de datasets actualizados) sobre la red de nodos entera. Args: fields (dict): Diccionario con claves 'recomendado', 'optativo', 'total_recomendado', 'total_optativo', cada uno con valores que representan la cantidad de c/u en la red de nodos entera. network_indicators (dict): Diccionario de la red de nodos, con las cantidades de datasets_meta_ok y datasets_(des)actualizados calculados previamente. Se modificará este argumento con los nuevos indicadores.
3.06048
2.477833
1.235144
result = { 'datasets_cant': None, 'distribuciones_cant': None, 'datasets_meta_ok_cant': None, 'datasets_meta_error_cant': None, 'datasets_meta_ok_pct': None, 'datasets_con_datos_cant': None, 'datasets_sin_datos_cant': None, 'datasets_con_datos_pct': None } try: summary = generate_datasets_summary(catalog, validator=validator) except Exception as e: msg = u'Error generando resumen del catálogo {}: {}'.format( catalog['title'], str(e)) logger.error(msg) return result cant_ok = 0 cant_error = 0 cant_data = 0 cant_without_data = 0 cant_distribuciones = 0 datasets_total = len(summary) for dataset in summary: cant_distribuciones += dataset['cant_distribuciones'] # chequea si el dataset tiene datos if dataset['tiene_datos'] == "SI": cant_data += 1 else: # == "ERROR" cant_without_data += 1 # chequea estado de los metadatos if dataset['estado_metadatos'] == "OK": cant_ok += 1 else: # == "ERROR" cant_error += 1 datasets_ok_pct = 0 datasets_with_data_pct = 0 if datasets_total: datasets_ok_pct = round(float(cant_ok) / datasets_total, 4) datasets_with_data_pct = round(float(cant_data) / datasets_total, 4) result.update({ 'datasets_cant': datasets_total, 'distribuciones_cant': cant_distribuciones, 'datasets_meta_ok_cant': cant_ok, 'datasets_meta_error_cant': cant_error, 'datasets_meta_ok_pct': datasets_ok_pct, 'datasets_con_datos_cant': cant_data, 'datasets_sin_datos_cant': cant_without_data, 'datasets_con_datos_pct': datasets_with_data_pct }) return result
def _generate_status_indicators(catalog, validator=None)
Genera indicadores básicos sobre el estado de un catálogo Args: catalog (dict): diccionario de un data.json parseado Returns: dict: indicadores básicos sobre el catálogo, tal como la cantidad de datasets, distribuciones y número de errores
2.150381
2.063199
1.042256
result = { 'datasets_desactualizados_cant': None, 'datasets_actualizados_cant': None, 'datasets_actualizados_pct': None, 'catalogo_ultima_actualizacion_dias': None } if not only_numeric: result.update({ 'datasets_frecuencia_cant': {} }) try: dias_ultima_actualizacion =\ _days_from_last_update(catalog, "modified") if not dias_ultima_actualizacion: dias_ultima_actualizacion =\ _days_from_last_update(catalog, "issued") result['catalogo_ultima_actualizacion_dias'] = \ dias_ultima_actualizacion except Exception as e: msg = u'Error generando indicadores de fecha del catálogo {}: {}'\ .format(catalog['title'], str(e)) logger.error(msg) return result actualizados = 0 desactualizados = 0 periodicity_amount = {} for dataset in catalog.get('dataset', []): # Parseo la fecha de publicación, y la frecuencia de actualización periodicity = dataset.get('accrualPeriodicity') if not periodicity: continue # Si la periodicity es eventual, se considera como actualizado if periodicity == 'eventual': actualizados += 1 prev_periodicity = periodicity_amount.get(periodicity, 0) periodicity_amount[periodicity] = prev_periodicity + 1 continue # dataset sin fecha de última actualización es desactualizado if "modified" not in dataset: desactualizados += 1 else: # Calculo el período de días que puede pasar sin actualizarse # Se parsea el período especificado por accrualPeriodicity, # cumple con el estándar ISO 8601 para tiempos con repetición try: date = helpers.parse_date_string(dataset['modified']) days_diff = float((datetime.now() - date).days) interval = helpers.parse_repeating_time_interval( periodicity) * \ (1 + tolerance) except Exception as e: msg = u'Error generando indicadores'\ u'de fecha del dataset {} en {}: {}' msg.format(dataset['identifier'], catalog['title'], str(e)) logger.error(msg) # Asumo desactualizado desactualizados += 1 continue if days_diff < interval: actualizados += 1 else: desactualizados += 1 prev_periodicity = periodicity_amount.get(periodicity, 0) periodicity_amount[periodicity] = prev_periodicity + 1 datasets_total = len(catalog.get('dataset', [])) actualizados_pct = 0 if datasets_total: actualizados_pct = float(actualizados) / datasets_total result.update({ 'datasets_desactualizados_cant': desactualizados, 'datasets_actualizados_cant': actualizados, 'datasets_actualizados_pct': round(actualizados_pct, 4) }) if not only_numeric: result.update({ 'datasets_frecuencia_cant': periodicity_amount }) return result
def _generate_date_indicators(catalog, tolerance=0.2, only_numeric=False)
Genera indicadores relacionados a las fechas de publicación y actualización del catálogo pasado por parámetro. La evaluación de si un catálogo se encuentra actualizado o no tiene un porcentaje de tolerancia hasta que se lo considere como tal, dado por el parámetro tolerance. Args: catalog (dict o str): path de un catálogo en formatos aceptados, o un diccionario de python tolerance (float): porcentaje de tolerancia hasta que se considere un catálogo como desactualizado, por ejemplo un catálogo con período de actualización de 10 días se lo considera como desactualizado a partir de los 12 con una tolerancia del 20%. También acepta valores negativos. Returns: dict: diccionario con indicadores
2.908193
2.80839
1.035537
# el "date_field" se busca primero a nivel catálogo, luego a nivel # de cada dataset, y nos quedamos con el que sea más reciente date_modified = catalog.get(date_field, None) dias_ultima_actualizacion = None # "date_field" a nivel de catálogo puede no ser obligatorio, # si no está pasamos if isinstance(date_modified, string_types): date = helpers.parse_date_string(date_modified) dias_ultima_actualizacion = ( datetime.now() - date).days if date else None for dataset in catalog.get('dataset', []): date = helpers.parse_date_string(dataset.get(date_field, "")) days_diff = float((datetime.now() - date).days) if date else None # Actualizo el indicador de días de actualización si corresponde if not dias_ultima_actualizacion or \ (days_diff and days_diff < dias_ultima_actualizacion): dias_ultima_actualizacion = days_diff if dias_ultima_actualizacion: return int(dias_ultima_actualizacion) else: return None
def _days_from_last_update(catalog, date_field="modified")
Calcula días desde la última actualización del catálogo. Args: catalog (dict): Un catálogo. date_field (str): Campo de metadatos a utilizar para considerar los días desde la última actualización del catálogo. Returns: int or None: Cantidad de días desde la última actualización del catálogo o None, si no pudo ser calculada.
3.513278
3.41575
1.028553
catalog = readers.read_catalog(catalog) # Archivo .json con el uso de cada campo. Lo cargamos a un dict catalog_fields_path = os.path.join(CATALOG_FIELDS_PATH, 'fields.json') with open(catalog_fields_path) as f: catalog_fields = json.load(f) # Armado recursivo del resultado return _count_fields_recursive(catalog, catalog_fields)
def _count_required_and_optional_fields(catalog)
Cuenta los campos obligatorios/recomendados/requeridos usados en 'catalog', junto con la cantidad máxima de dichos campos. Args: catalog (str o dict): path a un catálogo, o un dict de python que contenga a un catálogo ya leído Returns: dict: diccionario con las claves 'recomendado', 'optativo', 'requerido', 'recomendado_total', 'optativo_total', 'requerido_total', con la cantidad como valores.
6.022356
6.084118
0.989849
key_count = { 'recomendado': 0, 'optativo': 0, 'requerido': 0, 'total_optativo': 0, 'total_recomendado': 0, 'total_requerido': 0 } for k, v in fields.items(): # Si la clave es un diccionario se implementa recursivamente el # mismo algoritmo if isinstance(v, dict): # dataset[k] puede ser o un dict o una lista, ej 'dataset' es # list, 'publisher' no. Si no es lista, lo metemos en una. # Si no es ninguno de los dos, dataset[k] es inválido # y se pasa un diccionario vacío para poder comparar elements = dataset.get(k) if not isinstance(elements, (list, dict)): elements = [{}] if isinstance(elements, dict): elements = [dataset[k].copy()] for element in elements: # Llamada recursiva y suma del resultado al nuestro result = _count_fields_recursive(element, v) for key in result: key_count[key] += result[key] # Es un elemento normal (no iterable), se verifica si está en # dataset o no. Se suma 1 siempre al total de su tipo else: # total_requerido, total_recomendado, o total_optativo key_count['total_' + v] += 1 if k in dataset: key_count[v] += 1 return key_count
def _count_fields_recursive(dataset, fields)
Cuenta la información de campos optativos/recomendados/requeridos desde 'fields', y cuenta la ocurrencia de los mismos en 'dataset'. Args: dataset (dict): diccionario con claves a ser verificadas. fields (dict): diccionario con los campos a verificar en dataset como claves, y 'optativo', 'recomendado', o 'requerido' como valores. Puede tener objetios anidados pero no arrays. Returns: dict: diccionario con las claves 'recomendado', 'optativo', 'requerido', 'recomendado_total', 'optativo_total', 'requerido_total', con la cantidad como valores.
4.536685
3.890953
1.165957
if len(index) > 0: random_state = np.random.RandomState(seed=get_hash(key)) # Generate a random number for every simulant. # # NOTE: We generate a full set of random numbers for the population # even when we may only need a few. This ensures consistency in outcomes # across simulations. # See Also: # 1. https://en.wikipedia.org/wiki/Variance_reduction # 2. Untangling Uncertainty with Common Random Numbers: A Simulation Study; A.Flaxman, et. al., Summersim 2017 sample_size = index_map.map_size if index_map is not None else index.max() + 1 try: draw_index = index_map[index] except (IndexError, TypeError): draw_index = index raw_draws = random_state.random_sample(sample_size) return pd.Series(raw_draws[draw_index], index=index) return pd.Series(index=index)
def random(key: str, index: Index, index_map: IndexMap=None) -> pd.Series
Produces an indexed `pandas.Series` of uniformly distributed random numbers. The index passed in typically corresponds to a subset of rows in a `pandas.DataFrame` for which a probabilistic draw needs to be made. Parameters ---------- key : A string used to create a seed for the random number generation. index : The index used for the returned series. index_map : A mapping between the provided index (which may contain ints, floats, datetimes or any arbitrary combination of them) and an integer index into the random number array. Returns ------- pd.Series A series of random numbers indexed by the provided index.
6.382491
6.21008
1.027763
# 4294967295 == 2**32 - 1 which is the maximum allowable seed for a `numpy.random.RandomState`. return int(hashlib.sha1(key.encode('utf8')).hexdigest(), 16) % 4294967295
def get_hash(key: str) -> int
Gets a hash of the provided key. Parameters ---------- key : A string used to create a seed for the random number generator. Returns ------- int A hash of the provided key.
3.559613
3.3279
1.069627
# Convert p to normalized probabilities broadcasted over index. p = _set_residual_probability(_normalize_shape(p, index)) if p is not None else np.ones((len(index), len(choices))) p = p/p.sum(axis=1, keepdims=True) draw = random(key, index, index_map) p_bins = np.cumsum(p, axis=1) # Use the random draw to make a choice for every row in index. choice_index = (draw.values[np.newaxis].T > p_bins).sum(axis=1) return pd.Series(np.array(choices)[choice_index], index=index)
def choice(key: str, index: Index, choices: Array, p: Array=None, index_map: IndexMap=None) -> pd.Series
Decides between a weighted or unweighted set of choices. Given a a set of choices with or without corresponding weights, returns an indexed set of decisions from those choices. This is simply a vectorized way to make decisions with some book-keeping. Parameters ---------- key : A string used to create a seed for the random number generation. index : `pandas.Index` An index whose length is the number of random draws made and which indexes the returned `pandas.Series`. choices : A set of options to choose from. p : The relative weights of the choices. Can be either a 1-d array of the same length as `choices` or a 2-d array with `len(index)` rows and `len(choices)` columns. In the 1-d case, the same set of weights are used to decide among the choices for every item in the `index`. In the 2-d case, each row in `p` contains a separate set of weights for every item in the `index`. index_map : A mapping between the provided index (which may contain ints, floats, datetimes or any arbitrary combination of them) and an integer index into the random number array. Returns ------- pd.Series An indexed set of decisions from among the available `choices`. Raises ------ RandomnessError If any row in `p` contains `RESIDUAL_CHOICE` and the remaining weights in the row are not normalized or any row of `p` contains more than one reference to `RESIDUAL_CHOICE`.
5.243553
5.349186
0.980252
residual_mask = p == RESIDUAL_CHOICE if residual_mask.any(): # I.E. if we have any placeholders. if np.any(np.sum(residual_mask, axis=1) - 1): raise RandomnessError( 'More than one residual choice supplied for a single set of weights. Weights: {}.'.format(p)) p[residual_mask] = 0 residual_p = 1 - np.sum(p, axis=1) # Probabilities sum to 1. if np.any(residual_p < 0): # We got un-normalized probability weights. raise RandomnessError( 'Residual choice supplied with weights that summed to more than 1. Weights: {}.'.format(p)) p[residual_mask] = residual_p return p
def _set_residual_probability(p: np.ndarray) -> np.ndarray
Turns any use of `RESIDUAL_CHOICE` into a residual probability. Parameters ---------- p : Array where each row is a set of probability weights and potentially a `RESIDUAL_CHOICE` placeholder. Returns ------- np.ndarray Array where each row is a set of normalized probability weights.
4.669529
4.122971
1.132564
if population.empty: return population index = population if isinstance(population, pd.Index) else population.index draw = random(key, index, index_map) mask = np.array(draw < probability) return population[mask]
def filter_for_probability(key: str, population: Union[pd.DataFrame, pd.Series, Index], probability: Array, index_map: IndexMap=None) -> Union[pd.DataFrame, pd.Series, Index]
Decide an event outcome for each individual in a population from probabilities. Given a population or its index and an array of associated probabilities for some event to happen, we create and return the sub-population for whom the event occurred. Parameters ---------- key : A string used to create a seed for the random number generation. population : A view on the simulants for which we are determining the outcome of an event. probability : A 1d list of probabilities of the event under consideration occurring which corresponds (i.e. `len(population) == len(probability)`) to the population array passed in. index_map : A mapping between the provided index (which may contain ints, floats, datetimes or any arbitrary combination of them) and an integer index into the random number array. Returns ------- pd.core.generic.PandasObject The sub-population of the simulants for whom the event occurred. The return type will be the same as type(population)
4.665567
5.63611
0.827799
if not self._map.index.intersection(new_keys).empty: raise KeyError("Non-unique keys in index.") mapping_update = self.hash_(new_keys) if self._map.empty: self._map = mapping_update.drop_duplicates() else: self._map = self._map.append(mapping_update).drop_duplicates() collisions = mapping_update.index.difference(self._map.index) salt = 1 while not collisions.empty: mapping_update = self.hash_(collisions, salt) self._map = self._map.append(mapping_update).drop_duplicates() collisions = mapping_update.index.difference(self._map.index) salt += 1
def update(self, new_keys: Index)
Adds the new keys to the mapping. Parameters ---------- new_keys : The new index to hash.
2.83241
2.711204
1.044705
key_frame = keys.to_frame() new_map = pd.Series(0, index=keys) salt = self.convert_to_ten_digit_int(pd.Series(salt, index=keys)) for i, column_name in enumerate(key_frame.columns): column = self.convert_to_ten_digit_int(key_frame[column_name]) primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 27] out = pd.Series(1, index=column.index) for idx, p in enumerate(primes): # numpy will almost always overflow here, but it is equivalent to modding # out by 2**64. Since it's much much larger than our map size # the amount of additional periodicity this introduces is pretty trivial. out *= np.power(p, self.digit(column, idx)) new_map += out + salt return new_map % self.map_size
def hash_(self, keys: Index, salt: int = 0) -> pd.Series
Hashes the given index into an integer index in the range [0, self.stride] Parameters ---------- keys : The new index to hash. salt : An integer used to perturb the hash in a deterministic way. Useful in dealing with collisions. Returns ------- pd.Series A pandas series indexed by the given keys and whose values take on integers in the range [0, self.stride]. Duplicates may appear and should be dealt with by the calling code.
5.342183
5.363026
0.996114
if isinstance(column.iloc[0], datetime.datetime): column = self.clip_to_seconds(column.astype(int)) elif np.issubdtype(column.iloc[0], np.integer): if not len(column >= 0) == len(column): raise RandomnessError("Values in integer columns must be greater than or equal to zero.") column = self.spread(column) elif np.issubdtype(column.iloc[0], np.floating): column = self.shift(column) else: raise RandomnessError(f"Unhashable column type {type(column.iloc[0])}. " "IndexMap accepts datetime like columns and numeric columns.") return column
def convert_to_ten_digit_int(self, column: pd.Series) -> pd.Series
Converts a column of datetimes, integers, or floats into a column of 10 digit integers. Parameters ---------- column : A series of datetimes, integers, or floats. Returns ------- pd.Series A series of ten digit integers based on the input data. Raises ------ RandomnessError : If the column contains data that is neither a datetime-like nor numeric.
4.740309
4.112103
1.15277
return (m // (10 ** n)) % 10
def digit(m: Union[int, pd.Series], n: int) -> Union[int, pd.Series]
Returns the nth digit of each number in m.
4.851006
4.603423
1.053782
return m // pd.Timedelta(1, unit='s').value
def clip_to_seconds(m: Union[int, pd.Series]) -> Union[int, pd.Series]
Clips UTC datetime in nanoseconds to seconds.
10.397306
7.855128
1.323633
return (m * 111_111) % self.TEN_DIGIT_MODULUS
def spread(self, m: Union[int, pd.Series]) -> Union[int, pd.Series]
Spreads out integer values to give smaller values more weight.
18.104872
16.697575
1.084282
out = m % 1 * self.TEN_DIGIT_MODULUS // 1 if isinstance(out, pd.Series): return out.astype(int) return int(out)
def shift(self, m: Union[float, pd.Series]) -> Union[int, pd.Series]
Shifts floats so that the first 10 decimal digits are significant.
7.534901
6.325756
1.191146
if self._for_initialization: raise RandomnessError('Initialization streams cannot be copied.') elif self._manager: return self._manager.get_randomness_stream('_'.join([self.key, key])) else: return RandomnessStream(self.key, self.clock, self.seed, self.index_map)
def copy_with_additional_key(self, key: Any) -> 'RandomnessStream'
Creates a copy of this stream that combines this streams key with a new one. Parameters ---------- key : The additional key to describe the new stream with. Returns ------- RandomnessStream A new RandomnessStream with a combined key.
6.329175
6.913142
0.915528
return '_'.join([self.key, str(self.clock()), str(additional_key), str(self.seed)])
def _key(self, additional_key: Any=None) -> str
Construct a hashable key from this object's state. Parameters ---------- additional_key : Any additional information used to seed random number generation. Returns ------- str A key to seed random number generation.
8.983864
11.21521
0.801043
if self._for_initialization: draw = random(self._key(additional_key), pd.Index(range(len(index))), self.index_map) draw.index = index else: draw = random(self._key(additional_key), index, self.index_map) return draw
def get_draw(self, index: Index, additional_key: Any=None) -> pd.Series
Get an indexed sequence of floats pulled from a uniform distribution over [0.0, 1.0) Parameters ---------- index : An index whose length is the number of random draws made and which indexes the returned `pandas.Series`. additional_key : Any additional information used to seed random number generation. Returns ------- pd.Series A series of random numbers indexed by the provided `pandas.Index`.
5.966606
5.534732
1.07803
return self.filter_for_probability(population, rate_to_probability(rate), additional_key)
def filter_for_rate(self, population: Union[pd.DataFrame, pd.Series, Index], rate: Array, additional_key: Any=None) -> Index
Decide an event outcome for each individual in a population from rates. Given a population or its index and an array of associated rates for some event to happen, we create and return the sub-population for whom the event occurred. Parameters ---------- population : A view on the simulants for which we are determining the outcome of an event. rate : A 1d list of rates of the event under consideration occurring which corresponds (i.e. `len(population) == len(probability))` to the population view passed in. The rates must be scaled to the simulation time-step size either manually or as a post-processing step in a rate pipeline. additional_key : Any additional information used to create the seed. Returns ------- Index The index of the simulants for whom the event occurred. See Also -------- framework.values: Value/rate pipeline management module.
5.596454
9.140864
0.612246
return filter_for_probability(self._key(additional_key), population, probability, self.index_map)
def filter_for_probability(self, population: Union[pd.DataFrame, pd.Series, Index], probability: Array, additional_key: Any=None) -> Index
Decide an event outcome for each individual in a population from probabilities. Given a population or its index and an array of associated probabilities for some event to happen, we create and return the sub-population for whom the event occurred. Parameters ---------- population : A view on the simulants for which we are determining the outcome of an event. probability : A 1d list of probabilities of the event under consideration occurring which corresponds (i.e. `len(population) == len(probability)` to the population view passed in. additional_key : Any additional information used to create the seed. Returns ------- Index The sub-population of the simulants for whom the event occurred. The return type will be the same as type(population)
9.490037
19.666372
0.482551
return choice(self._key(additional_key), index, choices, p, self.index_map)
def choice(self, index: Index, choices: Array, p: Array=None, additional_key: Any=None) -> pd.Series
Decides between a weighted or unweighted set of choices. Given a a set of choices with or without corresponding weights, returns an indexed set of decisions from those choices. This is simply a vectorized way to make decisions with some book-keeping. Parameters ---------- index : An index whose length is the number of random draws made and which indexes the returned `pandas.Series`. choices : A set of options to choose from. p : The relative weights of the choices. Can be either a 1-d array of the same length as `choices` or a 2-d array with `len(index)` rows and `len(choices)` columns. In the 1-d case, the same set of weights are used to decide among the choices for every item in the `index`. In the 2-d case, each row in `p` contains a separate set of weights for every item in the `index`. additional_key : Any additional information used to seed random number generation. Returns ------- pd.Series An indexed set of decisions from among the available `choices`. Raises ------ RandomnessError If any row in `p` contains `RESIDUAL_CHOICE` and the remaining weights in the row are not normalized or any row of `p contains more than one reference to `RESIDUAL_CHOICE`.
10.571656
18.609495
0.568079
if decision_point in self._decision_points: raise RandomnessError(f"Two separate places are attempting to create " f"the same randomness stream for {decision_point}") stream = RandomnessStream(key=decision_point, clock=self._clock, seed=self._seed, index_map=self._key_mapping, manager=self, for_initialization=for_initialization) self._decision_points[decision_point] = stream return stream
def get_randomness_stream(self, decision_point: str, for_initialization: bool=False) -> RandomnessStream
Provides a new source of random numbers for the given decision point. Parameters ---------- decision_point : A unique identifier for a stream of random numbers. Typically represents a decision that needs to be made each time step like 'moves_left' or 'gets_disease'. for_initialization : A flag indicating whether this stream is used to generate key initialization information that will be used to identify simulants in the Common Random Number framework. These streams cannot be copied and should only be used to generate the state table columns specified in ``builder.configuration.randomness.key_columns``. Raises ------ RandomnessError : If another location in the simulation has already created a randomness stream with the same identifier.
4.509183
4.064664
1.109362
if not all(k in simulants.columns for k in self._key_columns): raise RandomnessError("The simulants dataframe does not have all specified key_columns.") self._key_mapping.update(simulants.set_index(self._key_columns).index)
def register_simulants(self, simulants: pd.DataFrame)
Adds new simulants to the randomness mapping. Parameters ---------- simulants : A table with state data representing the new simulants. Each simulant should pass through this function exactly once. Raises ------ RandomnessError : If the provided table does not contain all key columns specified in the configuration.
5.100999
3.517918
1.450005
return self._randomness_manager.get_randomness_stream(decision_point, for_initialization)
def get_stream(self, decision_point: str, for_initialization: bool = False) -> RandomnessStream
Provides a new source of random numbers for the given decision point. ``vivarium`` provides a framework for Common Random Numbers which allows for variance reduction when modeling counter-factual scenarios. Users interested in causal analysis and comparisons between simulation scenarios should be careful to use randomness streams provided by the framework wherever randomness is employed. Parameters ---------- decision_point : A unique identifier for a stream of random numbers. Typically represents a decision that needs to be made each time step like 'moves_left' or 'gets_disease'. for_initialization : A flag indicating whether this stream is used to generate key initialization information that will be used to identify simulants in the Common Random Number framework. These streams cannot be copied and should only be used to generate the state table columns specified in ``builder.configuration.randomness.key_columns``. Returns ------- RandomnessStream An entry point into the Common Random Number generation framework. The stream provides vectorized access to random numbers and a few other utilities.
4.037845
5.832022
0.692358
group_ids = set() for group in Group.objects.all(): if group.properties.student_visible: group_ids.add(group.id) return Group.objects.filter(id__in=group_ids)
def student_visible(self)
Return a list of groups that are student-visible.
3.405801
2.598932
1.310462
if value in self.empty_values: return "" value = force_text(value).strip() return value
def to_python(self, value)
Returns a Unicode object.
5.502266
4.846674
1.135266
''' Resolve the time in seconds of a configuration value. ''' if value is None or isinstance(value,(int,long)): return value if NUMBER_TIME.match(value): return long(value) simple = SIMPLE_TIME.match(value) if SIMPLE_TIME.match(value): multiplier = long( simple.groups()[0] ) constant = SIMPLE_TIMES[ simple.groups()[1] ] return multiplier * constant if value in GREGORIAN_TIMES: return value raise ValueError('Unsupported time format %s'%value)
def _resolve_time(value)
Resolve the time in seconds of a configuration value.
5.21225
4.158798
1.253307
''' Return the time in seconds of a step. If a begin and end timestamp, return the time in seconds between them after adjusting for what buckets they alias to. If t1 and t0 resolve to the same bucket, ''' if t0!=None and t1!=None: tb0 = self.to_bucket( t0 ) tb1 = self.to_bucket( t1, steps=1 ) # NOTE: "end" of second bucket if tb0==tb1: return self._step return self.from_bucket( tb1 ) - self.from_bucket( tb0 ) return self._step
def step_size(self, t0=None, t1=None)
Return the time in seconds of a step. If a begin and end timestamp, return the time in seconds between them after adjusting for what buckets they alias to. If t1 and t0 resolve to the same bucket,
7.297741
3.043603
2.397731
''' Calculate the buckets within a starting and ending timestamp. ''' start_bucket = self.to_bucket(start) end_bucket = self.to_bucket(end) return range(start_bucket, end_bucket+1)
def buckets(self, start, end)
Calculate the buckets within a starting and ending timestamp.
3.893544
2.6369
1.476561
''' Return the ttl given the number of steps, None if steps is not defined or we're otherwise unable to calculate one. If relative_time is defined, then return a ttl that is the number of seconds from now that the record should be expired. ''' if steps: if relative_time: rtime = self.to_bucket(relative_time) ntime = self.to_bucket(time.time()) # The relative time is beyond our TTL cutoff if (ntime - rtime) > steps: return 0 # The relative time is in "recent" past or future else: return (steps + rtime - ntime) * self._step return steps * self._step return None
def ttl(self, steps, relative_time=None)
Return the ttl given the number of steps, None if steps is not defined or we're otherwise unable to calculate one. If relative_time is defined, then return a ttl that is the number of seconds from now that the record should be expired.
6.203485
3.128148
1.983117
''' Return the time in seconds for each step. Requires that we know a time relative to which we should calculate to account for variable length intervals (e.g. February) ''' tb0 = self.to_bucket( t0 ) if t1: tb1 = self.to_bucket( t1, steps=1 ) # NOTE: "end" of second bucket else: tb1 = self.to_bucket( t0, steps=1 ) # Calculate the difference in days, then multiply by simple scalar days = (self.from_bucket(tb1, native=True) - self.from_bucket(tb0, native=True)).days return days * SIMPLE_TIMES['d']
def step_size(self, t0, t1=None)
Return the time in seconds for each step. Requires that we know a time relative to which we should calculate to account for variable length intervals (e.g. February)
8.419893
4.301598
1.957387
''' Calculate the bucket from a timestamp. ''' dt = datetime.utcfromtimestamp( timestamp ) if steps!=0: if self._step == 'daily': dt = dt + timedelta(days=steps) elif self._step == 'weekly': dt = dt + timedelta(weeks=steps) elif self._step == 'monthly': dt = dt + MonthDelta(steps) elif self._step == 'yearly': year = int(dt.strftime( self.FORMATS[self._step] )) year += steps dt = datetime(year=year, month=1, day=1) return int(dt.strftime( self.FORMATS[self._step] ))
def to_bucket(self, timestamp, steps=0)
Calculate the bucket from a timestamp.
2.777652
2.661372
1.043692
''' Calculate the timestamp given a bucket. ''' # NOTE: this is due to a bug somewhere in strptime that does not process # the week number of '%Y%U' correctly. That bug could be very specific to # the combination of python and ubuntu that I was testing. bucket = str(bucket) if self._step == 'weekly': year, week = bucket[:4], bucket[4:] normal = datetime(year=int(year), month=1, day=1) + timedelta(weeks=int(week)) else: normal = datetime.strptime(bucket, self.FORMATS[self._step]) if native: return normal return long(time.mktime( normal.timetuple() ))
def from_bucket(self, bucket, native=False)
Calculate the timestamp given a bucket.
6.464039
5.750118
1.124158
''' Calculate the buckets within a starting and ending timestamp. ''' rval = [ self.to_bucket(start) ] step = 1 # In theory there's already been a check that end>start # TODO: Not a fan of an unbound while loop here while True: bucket = self.to_bucket(start, step) bucket_time = self.from_bucket( bucket ) if bucket_time >= end: if bucket_time==end: rval.append( bucket ) break rval.append( bucket ) step += 1 return rval
def buckets(self, start, end)
Calculate the buckets within a starting and ending timestamp.
6.364378
5.406761
1.177115
''' Normalize a timestamp according to the interval configuration. Optionally can be used to calculate the timestamp N steps away. ''' # So far, the only commonality with RelativeTime return self.from_bucket( self.to_bucket(timestamp, steps) )
def normalize(self, timestamp, steps=0)
Normalize a timestamp according to the interval configuration. Optionally can be used to calculate the timestamp N steps away.
17.909288
7.081137
2.529154
''' Return the ttl given the number of steps, None if steps is not defined or we're otherwise unable to calculate one. If relative_time is defined, then return a ttl that is the number of seconds from now that the record should be expired. ''' if steps: # Approximate the ttl based on number of seconds, since it's # "close enough" if relative_time: rtime = self.to_bucket(relative_time) ntime = self.to_bucket(time.time()) # Convert to number of days day_diff = (self.from_bucket(ntime, native=True) - self.from_bucket(rtime, native=True)).days # Convert steps to number of days as well step_diff = (steps*SIMPLE_TIMES[self._step[0]]) / SIMPLE_TIMES['d'] # The relative time is beyond our TTL cutoff if day_diff > step_diff: return 0 # The relative time is in "recent" past or future else: return (step_diff - day_diff) * SIMPLE_TIMES['d'] return steps * SIMPLE_TIMES[ self._step[0] ] return None
def ttl(self, steps, relative_time=None)
Return the ttl given the number of steps, None if steps is not defined or we're otherwise unable to calculate one. If relative_time is defined, then return a ttl that is the number of seconds from now that the record should be expired.
5.679403
3.80062
1.494336
''' Perform a bulk insert. The format of the inserts must be: { timestamp : { name: [ values ], ... }, ... } If the timestamp should be auto-generated, then "timestamp" should be None. Backends can implement this in any number of ways, the default being to perform a single insert for each value, with no specialized locking, transactions or batching. ''' if None in inserts: inserts[ time.time() ] = inserts.pop(None) if self._write_func: for timestamp,names in inserts.iteritems(): for name,values in names.iteritems(): names[name] = [ self._write_func(v) for v in values ] self._batch_insert(inserts, intervals, **kwargs)
def bulk_insert(self, inserts, intervals=0, **kwargs)
Perform a bulk insert. The format of the inserts must be: { timestamp : { name: [ values ], ... }, ... } If the timestamp should be auto-generated, then "timestamp" should be None. Backends can implement this in any number of ways, the default being to perform a single insert for each value, with no specialized locking, transactions or batching.
6.474836
2.111728
3.066132
''' Insert a value for the timeseries "name". For each interval in the configuration, will insert the value into a bucket for the interval "timestamp". If time is not supplied, will default to time.time(), else it should be a floating point value. If "intervals" is less than 0, inserts the value into timestamps "abs(intervals)" preceeding "timestamp" (i.e. "-1" inserts one extra value). If "intervals" is greater than 0, inserts the value into that many more intervals after "timestamp". The default behavior is to insert for a single timestamp. This supports the public methods of the same name in the subclasses. The value is expected to already be converted. ''' if not timestamp: timestamp = time.time() if isinstance(value, (list,tuple,set)): if self._write_func: value = [ self._write_func(v) for v in value ] return self._batch_insert({timestamp:{name:value}}, intervals, **kwargs) if self._write_func: value = self._write_func(value) # TODO: document acceptable names # TODO: document what types values are supported # TODO: document behavior when time is outside the bounds of TTLed config # TODO: document how the data is stored. # TODO: better abstraction for "intervals" processing rather than in each implementation self._insert( name, value, timestamp, intervals, **kwargs )
def insert(self, name, value, timestamp=None, intervals=0, **kwargs)
Insert a value for the timeseries "name". For each interval in the configuration, will insert the value into a bucket for the interval "timestamp". If time is not supplied, will default to time.time(), else it should be a floating point value. If "intervals" is less than 0, inserts the value into timestamps "abs(intervals)" preceeding "timestamp" (i.e. "-1" inserts one extra value). If "intervals" is greater than 0, inserts the value into that many more intervals after "timestamp". The default behavior is to insert for a single timestamp. This supports the public methods of the same name in the subclasses. The value is expected to already be converted.
7.834605
2.802938
2.795141
''' Support for batch insert. Default implementation is non-optimized and is a simple loop over values. ''' for timestamp,names in inserts.iteritems(): for name,values in names.iteritems(): for value in values: self._insert( name, value, timestamp, intervals, **kwargs )
def _batch_insert(self, inserts, intervals, **kwargs)
Support for batch insert. Default implementation is non-optimized and is a simple loop over values.
7.091985
3.234308
2.192737
''' Helper for the subclasses to generate a list of timestamps. ''' rval = [timestamp] if intervals<0: while intervals<0: rval.append( config['i_calc'].normalize(timestamp, intervals) ) intervals += 1 elif intervals>0: while intervals>0: rval.append( config['i_calc'].normalize(timestamp, intervals) ) intervals -= 1 return rval
def _normalize_timestamps(self, timestamp, intervals, config)
Helper for the subclasses to generate a list of timestamps.
5.201459
3.70645
1.403353
''' Returns a generator that iterates over all the intervals and returns data for various timestamps, in the form: ( unix_timestamp, data ) This will check for all timestamp buckets that might exist between the first and last timestamp in a series. Each timestamp bucket will be fetched separately to keep this memory efficient, at the cost of extra trips to the data store. Keyword arguments are the same as get(). ''' config = self._intervals.get(interval) if not config: raise UnknownInterval(interval) properties = self.properties(name)[interval] i_buckets = config['i_calc'].buckets(properties['first'], properties['last']) for i_bucket in i_buckets: data = self.get(name, interval, timestamp=config['i_calc'].from_bucket(i_bucket), **kwargs) for timestamp,row in data.items(): yield (timestamp,row)
def iterate(self, name, interval, **kwargs)
Returns a generator that iterates over all the intervals and returns data for various timestamps, in the form: ( unix_timestamp, data ) This will check for all timestamp buckets that might exist between the first and last timestamp in a series. Each timestamp bucket will be fetched separately to keep this memory efficient, at the cost of extra trips to the data store. Keyword arguments are the same as get().
7.608671
2.555367
2.977526
''' Join a list of results. Supports both get and series. ''' rval = OrderedDict() i_keys = set() for res in results: i_keys.update( res.keys() ) for i_key in sorted(i_keys): if coarse: rval[i_key] = join( [res.get(i_key) for res in results] ) else: rval[i_key] = OrderedDict() r_keys = set() for res in results: r_keys.update( res.get(i_key,{}).keys() ) for r_key in sorted(r_keys): rval[i_key][r_key] = join( [res.get(i_key,{}).get(r_key) for res in results] ) return rval
def _join_results(self, results, coarse, join)
Join a list of results. Supports both get and series.
2.448816
1.989759
1.23071
''' Process transforms on the data. ''' if isinstance(transform, (list,tuple,set)): return { t : self._transform(data,t,step_size) for t in transform } elif isinstance(transform, dict): return { tn : self._transform(data,tf,step_size) for tn,tf in transform.items() } return self._transform(data,transform,step_size)
def _process_transform(self, data, transform, step_size)
Process transforms on the data.
3.055201
2.692502
1.134707
''' Transform the data. If the transform is not supported by this series, returns the data unaltered. ''' if transform=='mean': total = sum( k*v for k,v in data.items() ) count = sum( data.values() ) data = float(total)/float(count) if count>0 else 0 elif transform=='count': data = sum(data.values()) elif transform=='min': data = min(data.keys() or [0]) elif transform=='max': data = max(data.keys() or [0]) elif transform=='sum': data = sum( k*v for k,v in data.items() ) elif transform=='rate': data = { k:v/float(step_size) for k,v in data.items() } elif callable(transform): data = transform(data, step_size) return data
def _transform(self, data, transform, step_size)
Transform the data. If the transform is not supported by this series, returns the data unaltered.
2.704439
2.263041
1.195047
''' Condense by adding together all of the lists. ''' rval = {} for resolution,histogram in data.items(): for value,count in histogram.items(): rval[ value ] = count + rval.get(value,0) return rval
def _condense(self, data)
Condense by adding together all of the lists.
8.72328
5.47251
1.594018
''' Join multiple rows worth of data into a single result. ''' rval = {} for row in rows: if row: for value,count in row.items(): rval[ value ] = count + rval.get(value,0) return rval
def _join(self, rows)
Join multiple rows worth of data into a single result.
6.849771
4.761981
1.438429
''' Join multiple rows worth of data into a single result. ''' rval = 0 for row in rows: if row: rval += row return rval
def _join(self, rows)
Join multiple rows worth of data into a single result.
10.523625
5.715749
1.841163
''' Transform the data. If the transform is not supported by this series, returns the data unaltered. ''' if callable(transform): data = transform(data, step_size) return data
def _transform(self, data, transform, step_size)
Transform the data. If the transform is not supported by this series, returns the data unaltered.
6.63206
2.932346
2.26169
''' Condense by returning the last real value of the gauge. ''' if data: data = filter(None,data.values()) if data: return data[-1] return None
def _condense(self, data)
Condense by returning the last real value of the gauge.
11.060218
4.329454
2.554645
''' Transform the data. If the transform is not supported by this series, returns the data unaltered. ''' if transform=='mean': total = sum( data ) count = len( data ) data = float(total)/float(count) if count>0 else 0 elif transform=='count': data = len(data) elif transform=='min': data = min(data or [0]) elif transform=='max': data = max(data or [0]) elif transform=='sum': data = sum(data) elif transform=='rate': data = len(data) / float(step_size) elif callable(transform): data = transform(data) return data
def _transform(self, data, transform, step_size)
Transform the data. If the transform is not supported by this series, returns the data unaltered.
3.05352
2.427206
1.258039
''' Condense by or-ing all of the sets. ''' if data: return reduce(operator.ior, data.values()) return set()
def _condense(self, data)
Condense by or-ing all of the sets.
10.897528
4.715469
2.311017
r from math import sqrt n = len(x) mean = sum(x) / float(n) std = sqrt(sum((a - mean)**2 for a in x) / float(n - 1)) return std
def stdev(x)
r"""Calculate standard deviation of data x[]: std = sqrt(\sum_i (x_i - mean)^2 \over n-1) https://wiki.python.org/moin/NumericAndScientificRecipes
2.797665
2.785158
1.00449
students_grp, _ = Group.objects.get_or_create(name="Students") sg_prop = students_grp.properties sg_prop.student_visible = True sg_prop.save() students_grp.save() for gr in [settings.SENIOR_GRADUATION_YEAR + y for y in range(0, 4)]: users = User.objects.users_in_year(gr) grp, _ = Group.objects.get_or_create(name="Class of {}".format(gr)) grp_prop = grp.properties grp_prop.student_visible = True grp_prop.save() grp.save() self.stdout.write("{}: {} users".format(gr, len(users))) for u in users: u.groups.add(grp) u.groups.add(students_grp) u.save() self.stdout.write("{}: Processed".format(gr)) self.stdout.write("Done.")
def handle(self, **options)
Create "Class of 20[16-19]" groups
3.070882
2.878486
1.066839
do_run = options["run"] if do_run: if not options["confirm"]: self.ask("===== WARNING! =====\n\n" "This script will DESTROY data! Ensure that you have a properly backed-up copy of your database before proceeding.\n\n" "===== WARNING! =====\n\n" "Continue?") else: self.stdout.write("In pretend mode.") current_year = timezone.now().year new_senior_year = current_year + 1 turnover_date = datetime.datetime(current_year, 7, 1) self.stdout.write("Turnover date set to: {}".format(turnover_date.strftime("%c"))) if not self.chk("SENIOR_GRADUATION_YEAR = {} in settings/__init__.py".format(new_senior_year), settings.SENIOR_GRADUATION_YEAR == new_senior_year): return self.stdout.write("Resolving absences") if do_run: self.clear_absences() self.stdout.write("Updating welcome state") if do_run: self.update_welcome() self.stdout.write("Deleting graduated users") if do_run: self.handle_delete() self.stdout.write("Archiving admin comments") if do_run: self.archive_admin_comments()
def handle(self, *args, **options)
EIGHTH: EighthBlock: filtered EighthSignup: absences removed EighthActivity: keep ANNOUNCEMENTS: AnnouncementRequest: filtered USERS: User: graduated students deleted
4.653563
4.574543
1.017274
user = auth.authenticate(username=userid, password=password) if user is None or (user and not user.is_active): raise exceptions.AuthenticationFailed("Invalid username/password.") return (user, None)
def authenticate_credentials(self, userid, password, request=None)
Authenticate the userid and password.
3.110249
3.249691
0.957091
remote_addr = (request.META["HTTP_X_FORWARDED_FOR"] if "HTTP_X_FORWARDED_FOR" in request.META else request.META.get("REMOTE_ADDR", "")) return remote_addr in settings.INTERNAL_IPS
def check_internal_ip(request)
request is an AsgiRequest
2.360949
2.329848
1.013349