_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q15600
setup_simulation_from_model_specification
train
def setup_simulation_from_model_specification(model_specification_file: str) -> InteractiveContext: """Construct a simulation from a model specification file and call its setup method. Parameters ---------- model_specification_file The path to a model specification file. Returns ------- A simulation context that is setup and ready to run. """ simulation = initialize_simulation_from_model_specification(model_specification_file) simulation.setup() return simulation
python
{ "resource": "" }
q15601
InteractiveContext.setup
train
def setup(self): """Setup the simulation and initialize its population.""" super().setup() self._start_time = self.clock.time self.initialize_simulants()
python
{ "resource": "" }
q15602
InteractiveContext.initialize_simulants
train
def initialize_simulants(self): """Initialize this simulation's population. Should not be called directly.""" super().initialize_simulants() self._initial_population = self.population.get_population(True)
python
{ "resource": "" }
q15603
InteractiveContext.reset
train
def reset(self): """Reset the simulation to its initial state.""" warnings.warn("This reset method is very crude. It should work for " "many simple simulations, but we make no guarantees. In " "particular, if you have components that manage their " "own state in any way, this might not work.") self.population._population = self._initial_population self.clock._time = self._start_time
python
{ "resource": "" }
q15604
InteractiveContext.run
train
def run(self, with_logging: bool=True) -> int: """Run the simulation for the time duration specified in the configuration Parameters ---------- with_logging Whether or not to log the simulation steps. Only works in an ipython environment. Returns ------- The number of steps the simulation took. """ return self.run_until(self.clock.stop_time, with_logging=with_logging)
python
{ "resource": "" }
q15605
InteractiveContext.run_for
train
def run_for(self, duration: Timedelta, with_logging: bool=True) -> int: """Run the simulation for the given time duration. Parameters ---------- duration The length of time to run the simulation for. Should be the same type as the simulation clock's step size (usually a pandas Timedelta). with_logging Whether or not to log the simulation steps. Only works in an ipython environment. Returns ------- The number of steps the simulation took. """ return self.run_until(self.clock.time + duration, with_logging=with_logging)
python
{ "resource": "" }
q15606
InteractiveContext.run_until
train
def run_until(self, end_time: Time, with_logging=True) -> int: """Run the simulation until the provided end time. Parameters ---------- end_time The time to run the simulation until. The simulation will run until its clock is greater than or equal to the provided end time. with_logging Whether or not to log the simulation steps. Only works in an ipython environment. Returns ------- The number of steps the simulation took. """ if not isinstance(end_time, type(self.clock.time)): raise ValueError(f"Provided time must be an instance of {type(self.clock.time)}") iterations = int(ceil((end_time - self.clock.time)/self.clock.step_size)) self.take_steps(number_of_steps=iterations, with_logging=with_logging) assert self.clock.time - self.clock.step_size < end_time <= self.clock.time return iterations
python
{ "resource": "" }
q15607
InteractiveContext.step
train
def step(self, step_size: Timedelta=None): """Advance the simulation one step. Parameters ---------- step_size An optional size of step to take. Must be the same type as the simulation clock's step size (usually a pandas.Timedelta). """ old_step_size = self.clock.step_size if step_size is not None: if not isinstance(step_size, type(self.clock.step_size)): raise ValueError(f"Provided time must be an instance of {type(self.clock.step_size)}") self.clock._step_size = step_size super().step() self.clock._step_size = old_step_size
python
{ "resource": "" }
q15608
InteractiveContext.take_steps
train
def take_steps(self, number_of_steps: int=1, step_size: Timedelta=None, with_logging: bool=True): """Run the simulation for the given number of steps. Parameters ---------- number_of_steps The number of steps to take. step_size An optional size of step to take. Must be the same type as the simulation clock's step size (usually a pandas.Timedelta). with_logging Whether or not to log the simulation steps. Only works in an ipython environment. """ if not isinstance(number_of_steps, int): raise ValueError('Number of steps must be an integer.') if run_from_ipython() and with_logging: for _ in log_progress(range(number_of_steps), name='Step'): self.step(step_size) else: for _ in range(number_of_steps): self.step(step_size)
python
{ "resource": "" }
q15609
InteractiveContext.get_population
train
def get_population(self, untracked: bool=False) -> pd.DataFrame: """Get a copy of the population state table.""" return self.population.get_population(untracked)
python
{ "resource": "" }
q15610
InteractiveContext.get_listeners
train
def get_listeners(self, event_type: str) -> List[Callable]: """Get all listeners of a particular type of event.""" if event_type not in self.events: raise ValueError(f'No event {event_type} in system.') return self.events.get_listeners(event_type)
python
{ "resource": "" }
q15611
InteractiveContext.get_emitter
train
def get_emitter(self, event_type: str) -> Callable: """Get the callable that emits the given type of events.""" if event_type not in self.events: raise ValueError(f'No event {event_type} in system.') return self.events.get_emitter(event_type)
python
{ "resource": "" }
q15612
InteractiveContext.get_components
train
def get_components(self) -> List: """Get a list of all components in the simulation.""" return [component for component in self.component_manager._components + self.component_manager._managers]
python
{ "resource": "" }
q15613
LookupTableInterface.build_table
train
def build_table(self, data, key_columns=('sex',), parameter_columns=(['age', 'age_group_start', 'age_group_end'], ['year', 'year_start', 'year_end']), value_columns=None) -> LookupTable: """Construct a LookupTable from input data. If data is a ``pandas.DataFrame``, an interpolation function of the specified order will be calculated for each permutation of the set of key_columns. The columns in parameter_columns will be used as parameters for the interpolation functions which will estimate all remaining columns in the table. If data is a number, time, list, or tuple, a scalar table will be constructed with the values in data as the values in each column of the table, named according to value_columns. Parameters ---------- data : The source data which will be used to build the resulting LookupTable. key_columns : [str] Columns used to select between interpolation functions. These should be the non-continuous variables in the data. For example 'sex' in data about a population. parameter_columns : [str] The columns which contain the parameters to the interpolation functions. These should be the continuous variables. For example 'age' in data about a population. value_columns : [str] The data columns that will be in the resulting LookupTable. Columns to be interpolated over if interpolation or the names of the columns in the scalar table. Returns ------- LookupTable """ return self._lookup_table_manager.build_table(data, key_columns, parameter_columns, value_columns)
python
{ "resource": "" }
q15614
ResultsWriter.add_sub_directory
train
def add_sub_directory(self, key, path): """Adds a sub-directory to the results directory. Parameters ---------- key: str A look-up key for the directory path. path: str The relative path from the root of the results directory to the sub-directory. Returns ------- str: The absolute path to the sub-directory. """ sub_dir_path = os.path.join(self.results_root, path) os.makedirs(sub_dir_path, exist_ok=True) self._directories[key] = sub_dir_path return sub_dir_path
python
{ "resource": "" }
q15615
ResultsWriter.write_output
train
def write_output(self, data, file_name, key=None): """Writes output data to disk. Parameters ---------- data: pandas.DataFrame or dict The data to write to disk. file_name: str The name of the file to write. key: str, optional The lookup key for the sub_directory to write results to, if any. """ path = os.path.join(self._directories[key], file_name) extension = file_name.split('.')[-1] if extension == 'yaml': with open(path, 'w') as f: yaml.dump(data, f) elif extension == 'hdf': # to_hdf breaks with categorical dtypes. categorical_columns = data.dtypes[data.dtypes == 'category'].index data.loc[:, categorical_columns] = data.loc[:, categorical_columns].astype('object') # Writing to an hdf over and over balloons the file size so write to new file and move it over to avoid data.to_hdf(path + "update", 'data') if os.path.exists(path): os.remove(path) os.rename(path + "update", path) else: raise NotImplementedError( f"Only 'yaml' and 'hdf' file types are supported. You requested {extension}")
python
{ "resource": "" }
q15616
ResultsWriter.copy_file
train
def copy_file(self, src_path, file_name, key=None): """Copies a file unmodified to a location inside the ouput directory. Parameters ---------- src_path: str Path to the src file file_name: str name of the destination file """ path = os.path.join(self._directories[key], file_name) shutil.copyfile(src_path, path)
python
{ "resource": "" }
q15617
replace_combiner
train
def replace_combiner(value, mutator, *args, **kwargs): """Replaces the output of the source or mutator with the output of the subsequent mutator. This is the default combiner. """ args = list(args) + [value] return mutator(*args, **kwargs)
python
{ "resource": "" }
q15618
set_combiner
train
def set_combiner(value, mutator, *args, **kwargs): """Expects the output of the source to be a set to which the result of each mutator is added. """ value.add(mutator(*args, **kwargs)) return value
python
{ "resource": "" }
q15619
list_combiner
train
def list_combiner(value, mutator, *args, **kwargs): """Expects the output of the source to be a list to which the result of each mutator is appended. """ value.append(mutator(*args, **kwargs)) return value
python
{ "resource": "" }
q15620
ValuesInterface.register_value_producer
train
def register_value_producer(self, value_name: str, source: Callable[..., pd.DataFrame]=None, preferred_combiner: Callable=replace_combiner, preferred_post_processor: Callable[..., pd.DataFrame]=None) -> Pipeline: """Marks a ``Callable`` as the producer of a named value. Parameters ---------- value_name : The name of the new dynamic value pipeline. source : A callable source for the dynamic value pipeline. preferred_combiner : A strategy for combining the source and the results of any calls to mutators in the pipeline. ``vivarium`` provides the strategies ``replace_combiner`` (the default), ``list_combiner``, and ``set_combiner`` which are importable from ``vivarium.framework.values``. Client code may define additional strategies as necessary. preferred_post_processor : A strategy for processing the final output of the pipeline. ``vivarium`` provides the strategies ``rescale_post_processor`` and ``joint_value_post_processor`` which are importable from ``vivarium.framework.values``. Client code may define additional strategies as necessary. Returns ------- Callable A callable reference to the named dynamic value pipeline. """ return self._value_manager.register_value_producer(value_name, source, preferred_combiner, preferred_post_processor)
python
{ "resource": "" }
q15621
ValuesInterface.register_rate_producer
train
def register_rate_producer(self, rate_name: str, source: Callable[..., pd.DataFrame]=None) -> Pipeline: """Marks a ``Callable`` as the producer of a named rate. This is a convenience wrapper around ``register_value_producer`` that makes sure rate data is appropriately scaled to the size of the simulation time step. It is equivalent to ``register_value_producer(value_name, source, preferred_combiner=replace_combiner, preferred_post_processor=rescale_post_processor)`` Parameters ---------- rate_name : The name of the new dynamic rate pipeline. source : A callable source for the dynamic rate pipeline. Returns ------- Callable A callable reference to the named dynamic rate pipeline. """ return self._value_manager.register_rate_producer(rate_name, source)
python
{ "resource": "" }
q15622
ValuesInterface.register_value_modifier
train
def register_value_modifier(self, value_name: str, modifier: Callable, priority: int=5): """Marks a ``Callable`` as the modifier of a named value. Parameters ---------- value_name : The name of the dynamic value pipeline to be modified. modifier : A function that modifies the source of the dynamic value pipeline when called. If the pipeline has a ``replace_combiner``, the modifier should accept the same arguments as the pipeline source with an additional last positional argument for the results of the previous stage in the pipeline. For the ``list_combiner`` and ``set_combiner`` strategies, the pipeline modifiers should have the same signature as the pipeline source. priority : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} An indication of the order in which pipeline modifiers should be called. Modifiers with smaller priority values will be called earlier in the pipeline. Modifiers with the same priority have no guaranteed ordering, and so should be commutative. """ self._value_manager.register_value_modifier(value_name, modifier, priority)
python
{ "resource": "" }
q15623
run
train
def run(model_specification, results_directory, verbose, log, with_debugger): """Run a simulation from the command line. The simulation itself is defined by the given MODEL_SPECIFICATION yaml file. Within the results directory, which defaults to ~/vivarium_results if none is provided, a subdirectory will be created with the same name as the MODEL_SPECIFICATION if one does not exist. Results will be written to a further subdirectory named after the start time of the simulation run.""" log_level = logging.DEBUG if verbose else logging.ERROR logging.basicConfig(filename=log, level=log_level) try: run_simulation(model_specification, results_directory) except (BdbQuit, KeyboardInterrupt): raise except Exception as e: if with_debugger: import pdb import traceback traceback.print_exc() pdb.post_mortem() else: logging.exception("Uncaught exception {}".format(e)) raise
python
{ "resource": "" }
q15624
profile
train
def profile(model_specification, results_directory, process): """Run a simulation based on the provided MODEL_SPECIFICATION and profile the run. """ model_specification = Path(model_specification) results_directory = Path(results_directory) out_stats_file = results_directory / f'{model_specification.name}'.replace('yaml', 'stats') command = f'run_simulation("{model_specification}", "{results_directory}")' cProfile.runctx(command, globals=globals(), locals=locals(), filename=out_stats_file) if process: out_txt_file = results_directory / (out_stats_file.name + '.txt') with open(out_txt_file, 'w') as f: p = pstats.Stats(str(out_stats_file), stream=f) p.sort_stats('cumulative') p.print_stats()
python
{ "resource": "" }
q15625
Event.split
train
def split(self, new_index): """Create a new event which is a copy of this one but with a new index. """ new_event = Event(new_index, self.user_data) new_event.time = self.time new_event.step_size = self.step_size return new_event
python
{ "resource": "" }
q15626
_EventChannel.emit
train
def emit(self, event): """Notifies all listeners to this channel that an event has occurred. Parameters ---------- event : Event The event to be emitted. """ if hasattr(event, 'time'): event.step_size = self.manager.step_size() event.time = self.manager.clock() + self.manager.step_size() for priority_bucket in self.listeners: for listener in sorted(priority_bucket, key=lambda x: x.__name__): listener(event) return event
python
{ "resource": "" }
q15627
EventManager.setup
train
def setup(self, builder): """Performs this components simulation setup. Parameters ---------- builder : vivarium.framework.engine.Builder Object giving access to core framework functionality. """ self.clock = builder.time.clock() self.step_size = builder.time.step_size()
python
{ "resource": "" }
q15628
EventManager.register_listener
train
def register_listener(self, name, listener, priority=5): """Registers a new listener to the named event. Parameters ---------- name : str The name of the event. listener : Callable The consumer of the named event. priority : int Number in range(10) used to assign the ordering in which listeners process the event. """ self._event_types[name].listeners[priority].append(listener)
python
{ "resource": "" }
q15629
EventInterface.get_emitter
train
def get_emitter(self, name: str) -> Callable[[Event], Event]: """Gets and emitter for a named event. Parameters ---------- name : The name of the event he requested emitter will emit. Users may provide their own named events by requesting an emitter with this function, but should do so with caution as it makes time much more difficult to think about. Returns ------- An emitter for the named event. The emitter should be called by the requesting component at the appropriate point in the simulation lifecycle. """ return self._event_manager.get_emitter(name)
python
{ "resource": "" }
q15630
EventInterface.register_listener
train
def register_listener(self, name: str, listener: Callable[[Event], None], priority: int=5) -> None: """Registers a callable as a listener to a events with the given name. The listening callable will be called with a named ``Event`` as it's only argument any time the event emitter is invoked from somewhere in the simulation. The framework creates the following events and emits them at different points in the simulation: At the end of the setup phase: ``post_setup`` Every time step: ``time_step__prepare``, ``time_step``, ``time_step__cleanup``, ``collect_metrics`` At simulation end: ``simulation_end`` Parameters ---------- name : The name of the event to listen for. listener : The callable to be invoked any time an ``Event`` with the given name is emitted. priority : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} An indication of the order in which event listeners should be called. Listeners with smaller priority values will be called earlier. Listeners with the same priority have no guaranteed ordering. This feature should be avoided if possible. Components should strive to obey the Markov property as they transform the state table (the state of the simulation at the beginning of the next time step should only depend on the current state of the system). """ self._event_manager.register_listener(name, listener, priority)
python
{ "resource": "" }
q15631
PopulationView.get
train
def get(self, index: pd.Index, query: str='', omit_missing_columns: bool=False) -> pd.DataFrame: """For the rows in ``index`` get the columns from the simulation's population which this view is configured. The result may be further filtered by the view's query. Parameters ---------- index : Index of the population to get. query : Conditions used to filter the index. May use columns not in the requested view. omit_missing_columns : Silently skip loading columns which are not present in the population table. In general you want this to be False because that situation indicates an error but sometimes, like during population initialization, it can be convenient to just load whatever data is actually available. Returns ------- pd.DataFrame A table with the subset of the population requested. """ pop = self.manager.get_population(True).loc[index] if self._query: pop = pop.query(self._query) if query: pop = pop.query(query) if not self._columns: return pop else: if omit_missing_columns: columns = list(set(self._columns).intersection(pop.columns)) else: columns = self._columns try: return pop[columns].copy() except KeyError: non_existent_columns = set(columns) - set(pop.columns) raise PopulationError(f'Requested column(s) {non_existent_columns} not in population table.')
python
{ "resource": "" }
q15632
PopulationView.update
train
def update(self, pop: Union[pd.DataFrame, pd.Series]): """Update the simulation's state to match ``pop`` Parameters ---------- pop : The data which should be copied into the simulation's state. If ``pop`` is a DataFrame only those columns included in the view's columns will be used. If ``pop`` is a Series it must have a name that matches one of the view's columns unless the view only has one column in which case the Series will be assumed to refer to that regardless of its name. """ if not pop.empty: if isinstance(pop, pd.Series): if pop.name in self._columns: affected_columns = [pop.name] elif len(self._columns) == 1: affected_columns = self._columns else: raise PopulationError('Cannot update with a Series unless the series name equals a column ' 'name or there is only a single column in the view') else: affected_columns = set(pop.columns) affected_columns = set(affected_columns).intersection(self._columns) state_table = self.manager.get_population(True) if not self.manager.growing: affected_columns = set(affected_columns).intersection(state_table.columns) for c in affected_columns: if c in state_table: v = state_table[c].values if isinstance(pop, pd.Series): v2 = pop.values else: v2 = pop[c].values v[pop.index] = v2 if v.dtype != v2.dtype: # This happens when the population is being grown because extending # the index forces columns that don't have a natural null type # to become 'object' if not self.manager.growing: raise PopulationError('Component corrupting population table. ' 'Old column type: {} New column type: {}'.format(v.dtype, v2.dtype)) v = v.astype(v2.dtype) else: if isinstance(pop, pd.Series): v = pop.values else: v = pop[c].values self.manager._population[c] = v
python
{ "resource": "" }
q15633
PopulationManager.get_view
train
def get_view(self, columns: Sequence[str], query: str=None) -> PopulationView: """Return a configured PopulationView Notes ----- Client code should only need this (and only through the version exposed as ``population_view`` on the builder during setup) if it uses dynamically generated column names that aren't known at definition time. Otherwise components should use ``uses_columns``. """ if 'tracked' not in columns: query_with_track = query + 'and tracked == True' if query else 'tracked == True' return PopulationView(self, columns, query_with_track) return PopulationView(self, columns, query)
python
{ "resource": "" }
q15634
PopulationInterface.get_view
train
def get_view(self, columns: Sequence[str], query: str = None) -> PopulationView: """Get a time-varying view of the population state table. The requested population view can be used to view the current state or to update the state with new values. Parameters ---------- columns : A subset of the state table columns that will be available in the returned view. query : A filter on the population state. This filters out particular rows (simulants) based on their current state. The query should be provided in a way that is understood by the ``pandas.DataFrame.query`` method and may reference state table columns not requested in the ``columns`` argument. Returns ------- PopulationView A filtered view of the requested columns of the population state table. """ return self._population_manager.get_view(columns, query)
python
{ "resource": "" }
q15635
PopulationInterface.initializes_simulants
train
def initializes_simulants(self, initializer: Callable[[SimulantData], None], creates_columns: Sequence[str]=(), requires_columns: Sequence[str]=()): """Marks a callable as a source of initial state information for new simulants. Parameters ---------- initializer : A callable that adds or updates initial state information about new simulants. creates_columns : A list of the state table columns that the given initializer provides the initial state information for. requires_columns : A list of the state table columns that already need to be present and populated in the state table before the provided initializer is called. """ self._population_manager.register_simulant_initializer(initializer, creates_columns, requires_columns)
python
{ "resource": "" }
q15636
ensure_dir_exists
train
def ensure_dir_exists(directory): """Se asegura de que un directorio exista.""" if directory and not os.path.exists(directory): os.makedirs(directory)
python
{ "resource": "" }
q15637
traverse_dict
train
def traverse_dict(dicc, keys, default_value=None): """Recorre un diccionario siguiendo una lista de claves, y devuelve default_value en caso de que alguna de ellas no exista. Args: dicc (dict): Diccionario a ser recorrido. keys (list): Lista de claves a ser recorrida. Puede contener índices de listas y claves de diccionarios mezcladas. default_value: Valor devuelto en caso de que `dicc` no se pueda recorrer siguiendo secuencialmente la lista de `keys` hasta el final. Returns: object: El valor obtenido siguiendo la lista de `keys` dentro de `dicc`. """ for key in keys: if isinstance(dicc, dict) and key in dicc: dicc = dicc[key] elif (isinstance(dicc, list) and isinstance(key, int) and key < len(dicc)): dicc = dicc[key] else: return default_value return dicc
python
{ "resource": "" }
q15638
parse_value
train
def parse_value(cell): """Extrae el valor de una celda de Excel como texto.""" value = cell.value # stripea espacios en strings if isinstance(value, string_types): value = value.strip() # convierte a texto ISO 8601 las fechas if isinstance(value, (datetime)): value = value.isoformat() return value
python
{ "resource": "" }
q15639
sheet_to_table
train
def sheet_to_table(worksheet): """Transforma una hoja de libro de Excel en una lista de diccionarios. Args: worksheet (Workbook.worksheet): Hoja de cálculo de un archivo XLSX según los lee `openpyxl` Returns: list_of_dicts: Lista de diccionarios, con tantos elementos como registros incluya la hoja, y con tantas claves por diccionario como campos tenga la hoja. """ headers = [] value_rows = [] for row_i, row in enumerate(worksheet.iter_rows()): # lee los headers y el tamaño máximo de la hoja en columnas en fila 1 if row_i == 0: for header_cell in row: if header_cell.value: headers.append(parse_value(header_cell)) else: break continue # limita la cantidad de celdas a considerar, por la cantidad de headers row_cells = [parse_value(cell) for index, cell in enumerate(row) if index < len(headers)] # agrega las filas siguientes que tengan al menos un campo no nulo if any(row_cells): value_rows.append(row_cells) # no se admiten filas vacías, eso determina el fin de la hoja else: break # convierte las filas en diccionarios con los headers como keys table = [ # Ignoro los campos con valores nulos (None) {k: v for (k, v) in zip(headers, row) if v is not None} for row in value_rows ] return table
python
{ "resource": "" }
q15640
string_to_list
train
def string_to_list(string, sep=",", filter_empty=False): """Transforma una string con elementos separados por `sep` en una lista.""" return [value.strip() for value in string.split(sep) if (not filter_empty or value)]
python
{ "resource": "" }
q15641
generate_distribution_ids
train
def generate_distribution_ids(catalog): """Genera identificadores para las distribuciones que no los tienen. Los identificadores de distribuciones se generan concatenando el id del dataset al que pertenecen con el índice posicional de la distribución en el dataset: distribution_identifier = "{dataset_identifier}_{index}". """ for dataset in catalog.get("dataset", []): for distribution_index, distribution in enumerate( dataset.get("distribution", [])): if "identifier" not in distribution: distribution["identifier"] = "{}_{}".format( dataset["identifier"], distribution_index)
python
{ "resource": "" }
q15642
BasePopulation.on_initialize_simulants
train
def on_initialize_simulants(self, pop_data: SimulantData): """Called by the simulation whenever new simulants are added. This component is responsible for creating and filling four columns in the population state table: 'age' : The age of the simulant in fractional years. 'sex' : The sex of the simulant. One of {'Male', 'Female'} 'alive' : Whether or not the simulant is alive. One of {'alive', 'dead'} 'entrance_time' : The time that the simulant entered the simulation. The 'birthday' for simulants that enter as newborns. A `pandas.Timestamp`. Parameters ---------- pop_data : A record containing the index of the new simulants, the start of the time step the simulants are added on, the width of the time step, and the age boundaries for the simulants to generate. """ age_start = self.config.population.age_start age_end = self.config.population.age_end if age_start == age_end: age_window = pop_data.creation_window / pd.Timedelta(days=365) else: age_window = age_end - age_start age_draw = self.age_randomness.get_draw(pop_data.index) age = age_start + age_draw * age_window if self.with_common_random_numbers: population = pd.DataFrame({'entrance_time': pop_data.creation_time, 'age': age.values}, index=pop_data.index) self.register(population) population['sex'] = self.sex_randomness.choice(pop_data.index, ['Male', 'Female']) population['alive'] = 'alive' else: population = pd.DataFrame( {'age': age.values, 'sex': self.sex_randomness.choice(pop_data.index, ['Male', 'Female']), 'alive': pd.Series('alive', index=pop_data.index), 'entrance_time': pop_data.creation_time}, index=pop_data.index) self.population_view.update(population)
python
{ "resource": "" }
q15643
BasePopulation.age_simulants
train
def age_simulants(self, event: Event): """Updates simulant age on every time step. Parameters ---------- event : An event object emitted by the simulation containing an index representing the simulants affected by the event and timing information. """ population = self.population_view.get(event.index, query="alive == 'alive'") population['age'] += event.step_size / pd.Timedelta(days=365) self.population_view.update(population)
python
{ "resource": "" }
q15644
random
train
def random(key: str, index: Index, index_map: IndexMap=None) -> pd.Series: """Produces an indexed `pandas.Series` of uniformly distributed random numbers. The index passed in typically corresponds to a subset of rows in a `pandas.DataFrame` for which a probabilistic draw needs to be made. Parameters ---------- key : A string used to create a seed for the random number generation. index : The index used for the returned series. index_map : A mapping between the provided index (which may contain ints, floats, datetimes or any arbitrary combination of them) and an integer index into the random number array. Returns ------- pd.Series A series of random numbers indexed by the provided index. """ if len(index) > 0: random_state = np.random.RandomState(seed=get_hash(key)) # Generate a random number for every simulant. # # NOTE: We generate a full set of random numbers for the population # even when we may only need a few. This ensures consistency in outcomes # across simulations. # See Also: # 1. https://en.wikipedia.org/wiki/Variance_reduction # 2. Untangling Uncertainty with Common Random Numbers: A Simulation Study; A.Flaxman, et. al., Summersim 2017 sample_size = index_map.map_size if index_map is not None else index.max() + 1 try: draw_index = index_map[index] except (IndexError, TypeError): draw_index = index raw_draws = random_state.random_sample(sample_size) return pd.Series(raw_draws[draw_index], index=index) return pd.Series(index=index)
python
{ "resource": "" }
q15645
get_hash
train
def get_hash(key: str) -> int: """Gets a hash of the provided key. Parameters ---------- key : A string used to create a seed for the random number generator. Returns ------- int A hash of the provided key. """ # 4294967295 == 2**32 - 1 which is the maximum allowable seed for a `numpy.random.RandomState`. return int(hashlib.sha1(key.encode('utf8')).hexdigest(), 16) % 4294967295
python
{ "resource": "" }
q15646
_set_residual_probability
train
def _set_residual_probability(p: np.ndarray) -> np.ndarray: """Turns any use of `RESIDUAL_CHOICE` into a residual probability. Parameters ---------- p : Array where each row is a set of probability weights and potentially a `RESIDUAL_CHOICE` placeholder. Returns ------- np.ndarray Array where each row is a set of normalized probability weights. """ residual_mask = p == RESIDUAL_CHOICE if residual_mask.any(): # I.E. if we have any placeholders. if np.any(np.sum(residual_mask, axis=1) - 1): raise RandomnessError( 'More than one residual choice supplied for a single set of weights. Weights: {}.'.format(p)) p[residual_mask] = 0 residual_p = 1 - np.sum(p, axis=1) # Probabilities sum to 1. if np.any(residual_p < 0): # We got un-normalized probability weights. raise RandomnessError( 'Residual choice supplied with weights that summed to more than 1. Weights: {}.'.format(p)) p[residual_mask] = residual_p return p
python
{ "resource": "" }
q15647
IndexMap.update
train
def update(self, new_keys: Index): """Adds the new keys to the mapping. Parameters ---------- new_keys : The new index to hash. """ if not self._map.index.intersection(new_keys).empty: raise KeyError("Non-unique keys in index.") mapping_update = self.hash_(new_keys) if self._map.empty: self._map = mapping_update.drop_duplicates() else: self._map = self._map.append(mapping_update).drop_duplicates() collisions = mapping_update.index.difference(self._map.index) salt = 1 while not collisions.empty: mapping_update = self.hash_(collisions, salt) self._map = self._map.append(mapping_update).drop_duplicates() collisions = mapping_update.index.difference(self._map.index) salt += 1
python
{ "resource": "" }
q15648
IndexMap.convert_to_ten_digit_int
train
def convert_to_ten_digit_int(self, column: pd.Series) -> pd.Series: """Converts a column of datetimes, integers, or floats into a column of 10 digit integers. Parameters ---------- column : A series of datetimes, integers, or floats. Returns ------- pd.Series A series of ten digit integers based on the input data. Raises ------ RandomnessError : If the column contains data that is neither a datetime-like nor numeric. """ if isinstance(column.iloc[0], datetime.datetime): column = self.clip_to_seconds(column.astype(int)) elif np.issubdtype(column.iloc[0], np.integer): if not len(column >= 0) == len(column): raise RandomnessError("Values in integer columns must be greater than or equal to zero.") column = self.spread(column) elif np.issubdtype(column.iloc[0], np.floating): column = self.shift(column) else: raise RandomnessError(f"Unhashable column type {type(column.iloc[0])}. " "IndexMap accepts datetime like columns and numeric columns.") return column
python
{ "resource": "" }
q15649
IndexMap.digit
train
def digit(m: Union[int, pd.Series], n: int) -> Union[int, pd.Series]: """Returns the nth digit of each number in m.""" return (m // (10 ** n)) % 10
python
{ "resource": "" }
q15650
IndexMap.clip_to_seconds
train
def clip_to_seconds(m: Union[int, pd.Series]) -> Union[int, pd.Series]: """Clips UTC datetime in nanoseconds to seconds.""" return m // pd.Timedelta(1, unit='s').value
python
{ "resource": "" }
q15651
IndexMap.spread
train
def spread(self, m: Union[int, pd.Series]) -> Union[int, pd.Series]: """Spreads out integer values to give smaller values more weight.""" return (m * 111_111) % self.TEN_DIGIT_MODULUS
python
{ "resource": "" }
q15652
IndexMap.shift
train
def shift(self, m: Union[float, pd.Series]) -> Union[int, pd.Series]: """Shifts floats so that the first 10 decimal digits are significant.""" out = m % 1 * self.TEN_DIGIT_MODULUS // 1 if isinstance(out, pd.Series): return out.astype(int) return int(out)
python
{ "resource": "" }
q15653
RandomnessStream.copy_with_additional_key
train
def copy_with_additional_key(self, key: Any) -> 'RandomnessStream': """Creates a copy of this stream that combines this streams key with a new one. Parameters ---------- key : The additional key to describe the new stream with. Returns ------- RandomnessStream A new RandomnessStream with a combined key. """ if self._for_initialization: raise RandomnessError('Initialization streams cannot be copied.') elif self._manager: return self._manager.get_randomness_stream('_'.join([self.key, key])) else: return RandomnessStream(self.key, self.clock, self.seed, self.index_map)
python
{ "resource": "" }
q15654
RandomnessStream._key
train
def _key(self, additional_key: Any=None) -> str: """Construct a hashable key from this object's state. Parameters ---------- additional_key : Any additional information used to seed random number generation. Returns ------- str A key to seed random number generation. """ return '_'.join([self.key, str(self.clock()), str(additional_key), str(self.seed)])
python
{ "resource": "" }
q15655
RandomnessStream.get_draw
train
def get_draw(self, index: Index, additional_key: Any=None) -> pd.Series: """Get an indexed sequence of floats pulled from a uniform distribution over [0.0, 1.0) Parameters ---------- index : An index whose length is the number of random draws made and which indexes the returned `pandas.Series`. additional_key : Any additional information used to seed random number generation. Returns ------- pd.Series A series of random numbers indexed by the provided `pandas.Index`. """ if self._for_initialization: draw = random(self._key(additional_key), pd.Index(range(len(index))), self.index_map) draw.index = index else: draw = random(self._key(additional_key), index, self.index_map) return draw
python
{ "resource": "" }
q15656
RandomnessStream.filter_for_rate
train
def filter_for_rate(self, population: Union[pd.DataFrame, pd.Series, Index], rate: Array, additional_key: Any=None) -> Index: """Decide an event outcome for each individual in a population from rates. Given a population or its index and an array of associated rates for some event to happen, we create and return the sub-population for whom the event occurred. Parameters ---------- population : A view on the simulants for which we are determining the outcome of an event. rate : A 1d list of rates of the event under consideration occurring which corresponds (i.e. `len(population) == len(probability))` to the population view passed in. The rates must be scaled to the simulation time-step size either manually or as a post-processing step in a rate pipeline. additional_key : Any additional information used to create the seed. Returns ------- Index The index of the simulants for whom the event occurred. See Also -------- framework.values: Value/rate pipeline management module. """ return self.filter_for_probability(population, rate_to_probability(rate), additional_key)
python
{ "resource": "" }
q15657
RandomnessManager.register_simulants
train
def register_simulants(self, simulants: pd.DataFrame): """Adds new simulants to the randomness mapping. Parameters ---------- simulants : A table with state data representing the new simulants. Each simulant should pass through this function exactly once. Raises ------ RandomnessError : If the provided table does not contain all key columns specified in the configuration. """ if not all(k in simulants.columns for k in self._key_columns): raise RandomnessError("The simulants dataframe does not have all specified key_columns.") self._key_mapping.update(simulants.set_index(self._key_columns).index)
python
{ "resource": "" }
q15658
GroupManager.student_visible
train
def student_visible(self): """Return a list of groups that are student-visible. """ group_ids = set() for group in Group.objects.all(): if group.properties.student_visible: group_ids.add(group.id) return Group.objects.filter(id__in=group_ids)
python
{ "resource": "" }
q15659
PhoneFormField.to_python
train
def to_python(self, value): """Returns a Unicode object.""" if value in self.empty_values: return "" value = force_text(value).strip() return value
python
{ "resource": "" }
q15660
_resolve_time
train
def _resolve_time(value): ''' Resolve the time in seconds of a configuration value. ''' if value is None or isinstance(value,(int,long)): return value if NUMBER_TIME.match(value): return long(value) simple = SIMPLE_TIME.match(value) if SIMPLE_TIME.match(value): multiplier = long( simple.groups()[0] ) constant = SIMPLE_TIMES[ simple.groups()[1] ] return multiplier * constant if value in GREGORIAN_TIMES: return value raise ValueError('Unsupported time format %s'%value)
python
{ "resource": "" }
q15661
RelativeTime.step_size
train
def step_size(self, t0=None, t1=None): ''' Return the time in seconds of a step. If a begin and end timestamp, return the time in seconds between them after adjusting for what buckets they alias to. If t1 and t0 resolve to the same bucket, ''' if t0!=None and t1!=None: tb0 = self.to_bucket( t0 ) tb1 = self.to_bucket( t1, steps=1 ) # NOTE: "end" of second bucket if tb0==tb1: return self._step return self.from_bucket( tb1 ) - self.from_bucket( tb0 ) return self._step
python
{ "resource": "" }
q15662
GregorianTime.to_bucket
train
def to_bucket(self, timestamp, steps=0): ''' Calculate the bucket from a timestamp. ''' dt = datetime.utcfromtimestamp( timestamp ) if steps!=0: if self._step == 'daily': dt = dt + timedelta(days=steps) elif self._step == 'weekly': dt = dt + timedelta(weeks=steps) elif self._step == 'monthly': dt = dt + MonthDelta(steps) elif self._step == 'yearly': year = int(dt.strftime( self.FORMATS[self._step] )) year += steps dt = datetime(year=year, month=1, day=1) return int(dt.strftime( self.FORMATS[self._step] ))
python
{ "resource": "" }
q15663
GregorianTime.from_bucket
train
def from_bucket(self, bucket, native=False): ''' Calculate the timestamp given a bucket. ''' # NOTE: this is due to a bug somewhere in strptime that does not process # the week number of '%Y%U' correctly. That bug could be very specific to # the combination of python and ubuntu that I was testing. bucket = str(bucket) if self._step == 'weekly': year, week = bucket[:4], bucket[4:] normal = datetime(year=int(year), month=1, day=1) + timedelta(weeks=int(week)) else: normal = datetime.strptime(bucket, self.FORMATS[self._step]) if native: return normal return long(time.mktime( normal.timetuple() ))
python
{ "resource": "" }
q15664
GregorianTime.normalize
train
def normalize(self, timestamp, steps=0): ''' Normalize a timestamp according to the interval configuration. Optionally can be used to calculate the timestamp N steps away. ''' # So far, the only commonality with RelativeTime return self.from_bucket( self.to_bucket(timestamp, steps) )
python
{ "resource": "" }
q15665
Timeseries._batch_insert
train
def _batch_insert(self, inserts, intervals, **kwargs): ''' Support for batch insert. Default implementation is non-optimized and is a simple loop over values. ''' for timestamp,names in inserts.iteritems(): for name,values in names.iteritems(): for value in values: self._insert( name, value, timestamp, intervals, **kwargs )
python
{ "resource": "" }
q15666
Timeseries._normalize_timestamps
train
def _normalize_timestamps(self, timestamp, intervals, config): ''' Helper for the subclasses to generate a list of timestamps. ''' rval = [timestamp] if intervals<0: while intervals<0: rval.append( config['i_calc'].normalize(timestamp, intervals) ) intervals += 1 elif intervals>0: while intervals>0: rval.append( config['i_calc'].normalize(timestamp, intervals) ) intervals -= 1 return rval
python
{ "resource": "" }
q15667
Timeseries._join_results
train
def _join_results(self, results, coarse, join): ''' Join a list of results. Supports both get and series. ''' rval = OrderedDict() i_keys = set() for res in results: i_keys.update( res.keys() ) for i_key in sorted(i_keys): if coarse: rval[i_key] = join( [res.get(i_key) for res in results] ) else: rval[i_key] = OrderedDict() r_keys = set() for res in results: r_keys.update( res.get(i_key,{}).keys() ) for r_key in sorted(r_keys): rval[i_key][r_key] = join( [res.get(i_key,{}).get(r_key) for res in results] ) return rval
python
{ "resource": "" }
q15668
Timeseries._process_transform
train
def _process_transform(self, data, transform, step_size): ''' Process transforms on the data. ''' if isinstance(transform, (list,tuple,set)): return { t : self._transform(data,t,step_size) for t in transform } elif isinstance(transform, dict): return { tn : self._transform(data,tf,step_size) for tn,tf in transform.items() } return self._transform(data,transform,step_size)
python
{ "resource": "" }
q15669
Histogram._condense
train
def _condense(self, data): ''' Condense by adding together all of the lists. ''' rval = {} for resolution,histogram in data.items(): for value,count in histogram.items(): rval[ value ] = count + rval.get(value,0) return rval
python
{ "resource": "" }
q15670
Gauge._condense
train
def _condense(self, data): ''' Condense by returning the last real value of the gauge. ''' if data: data = filter(None,data.values()) if data: return data[-1] return None
python
{ "resource": "" }
q15671
Set._condense
train
def _condense(self, data): ''' Condense by or-ing all of the sets. ''' if data: return reduce(operator.ior, data.values()) return set()
python
{ "resource": "" }
q15672
ApiBasicAuthentication.authenticate_credentials
train
def authenticate_credentials(self, userid, password, request=None): """Authenticate the userid and password.""" user = auth.authenticate(username=userid, password=password) if user is None or (user and not user.is_active): raise exceptions.AuthenticationFailed("Invalid username/password.") return (user, None)
python
{ "resource": "" }
q15673
check_internal_ip
train
def check_internal_ip(request): """ request is an AsgiRequest """ remote_addr = (request.META["HTTP_X_FORWARDED_FOR"] if "HTTP_X_FORWARDED_FOR" in request.META else request.META.get("REMOTE_ADDR", "")) return remote_addr in settings.INTERNAL_IPS
python
{ "resource": "" }
q15674
events_view
train
def events_view(request): """Events homepage. Shows a list of events occurring in the next week, month, and future. """ is_events_admin = request.user.has_admin_permission('events') if request.method == "POST": if "approve" in request.POST and is_events_admin: event_id = request.POST.get('approve') event = get_object_or_404(Event, id=event_id) event.rejected = False event.approved = True event.approved_by = request.user event.save() messages.success(request, "Approved event {}".format(event)) if "reject" in request.POST and is_events_admin: event_id = request.POST.get('reject') event = get_object_or_404(Event, id=event_id) event.approved = False event.rejected = True event.rejected_by = request.user event.save() messages.success(request, "Rejected event {}".format(event)) if is_events_admin and "show_all" in request.GET: viewable_events = (Event.objects.all().this_year().prefetch_related("groups")) else: viewable_events = (Event.objects.visible_to_user(request.user).this_year().prefetch_related("groups")) # get date objects for week and month today = datetime.date.today() delta = today - datetime.timedelta(days=today.weekday()) this_week = (delta, delta + datetime.timedelta(days=7)) this_month = (this_week[1], this_week[1] + datetime.timedelta(days=31)) events_categories = [{ "title": "This week", "events": viewable_events.filter(time__gte=this_week[0], time__lt=this_week[1]) }, { "title": "This month", "events": viewable_events.filter(time__gte=this_month[0], time__lt=this_month[1]) }, { "title": "Future", "events": viewable_events.filter(time__gte=this_month[1]) }] if is_events_admin: unapproved_events = (Event.objects.filter(approved=False, rejected=False).prefetch_related("groups")) events_categories = [{"title": "Awaiting Approval", "events": unapproved_events}] + events_categories if is_events_admin and "show_all" in request.GET: events_categories.append({"title": "Past", "events": viewable_events.filter(time__lt=this_week[0])}) context = { "events": events_categories, "num_events": sum([x["events"].count() for x in events_categories]), "is_events_admin": is_events_admin, "events_admin": is_events_admin, "show_attend": True, "show_icon": True } return render(request, "events/home.html", context)
python
{ "resource": "" }
q15675
join_event_view
train
def join_event_view(request, id): """Join event page. If a POST request, actually add or remove the attendance of the current user. Otherwise, display a page with confirmation. id: event id """ event = get_object_or_404(Event, id=id) if request.method == "POST": if not event.show_attending: return redirect("events") if "attending" in request.POST: attending = request.POST.get("attending") attending = (attending == "true") if attending: event.attending.add(request.user) else: event.attending.remove(request.user) return redirect("events") context = {"event": event, "is_events_admin": request.user.has_admin_permission('events')} return render(request, "events/join_event.html", context)
python
{ "resource": "" }
q15676
add_event_view
train
def add_event_view(request): """Add event page. Currently, there is an approval process for events. If a user is an events administrator, they can create events directly. Otherwise, their event is added in the system but must be approved. """ is_events_admin = request.user.has_admin_permission('events') if not is_events_admin: return redirect("request_event") if request.method == "POST": form = EventForm(data=request.POST, all_groups=request.user.has_admin_permission('groups')) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.description = safe_html(obj.description) # auto-approve if admin obj.approved = True obj.approved_by = request.user messages.success(request, "Because you are an administrator, this event was auto-approved.") obj.created_hook(request) obj.save() return redirect("events") else: messages.error(request, "Error adding event.") else: form = EventForm(all_groups=request.user.has_admin_permission('groups')) context = {"form": form, "action": "add", "action_title": "Add" if is_events_admin else "Submit", "is_events_admin": is_events_admin} return render(request, "events/add_modify.html", context)
python
{ "resource": "" }
q15677
modify_event_view
train
def modify_event_view(request, id=None): """Modify event page. You may only modify an event if you were the creator or you are an administrator. id: event id """ event = get_object_or_404(Event, id=id) is_events_admin = request.user.has_admin_permission('events') if not is_events_admin: raise exceptions.PermissionDenied if request.method == "POST": if is_events_admin: form = AdminEventForm(data=request.POST, instance=event, all_groups=request.user.has_admin_permission('groups')) else: form = EventForm(data=request.POST, instance=event, all_groups=request.user.has_admin_permission('groups')) logger.debug(form) if form.is_valid(): obj = form.save() # SAFE HTML obj.description = safe_html(obj.description) obj.save() messages.success(request, "Successfully modified event.") # return redirect("events") else: messages.error(request, "Error modifying event.") else: if is_events_admin: form = AdminEventForm(instance=event, all_groups=request.user.has_admin_permission('groups')) else: form = EventForm(instance=event, all_groups=request.user.has_admin_permission('groups')) context = {"form": form, "action": "modify", "action_title": "Modify", "id": id, "is_events_admin": is_events_admin} return render(request, "events/add_modify.html", context)
python
{ "resource": "" }
q15678
delete_event_view
train
def delete_event_view(request, id): """Delete event page. You may only delete an event if you were the creator or you are an administrator. Confirmation page if not POST. id: event id """ event = get_object_or_404(Event, id=id) if not request.user.has_admin_permission('events'): raise exceptions.PermissionDenied if request.method == "POST": try: event.delete() messages.success(request, "Successfully deleted event.") except Event.DoesNotExist: pass return redirect("events") else: return render(request, "events/delete.html", {"event": event})
python
{ "resource": "" }
q15679
show_event_view
train
def show_event_view(request): """ Unhide an event that was hidden by the logged-in user. events_hidden in the user model is the related_name for "users_hidden" in the EventUserMap model. """ if request.method == "POST": event_id = request.POST.get("event_id") if event_id: event = Event.objects.get(id=event_id) event.user_map.users_hidden.remove(request.user) event.user_map.save() return http.HttpResponse("Unhidden") raise http.Http404 else: return http.HttpResponseNotAllowed(["POST"], "HTTP 405: METHOD NOT ALLOWED")
python
{ "resource": "" }
q15680
chrome_getdata_view
train
def chrome_getdata_view(request): """Get the data of the last notification sent to the current user. This is needed because Chrome, as of version 44, doesn't support sending a data payload to a notification. Thus, information on what the notification is actually for must be manually fetched. """ data = {} if request.user.is_authenticated: # authenticated session notifs = GCMNotification.objects.filter(sent_to__user=request.user).order_by("-time") if notifs.count() > 0: notif = notifs.first() ndata = notif.data if "title" in ndata and "text" in ndata: data = { "title": ndata['title'] if 'title' in ndata else '', "text": ndata['text'] if 'text' in ndata else '', "url": ndata['url'] if 'url' in ndata else '' } else: schedule_chk = chrome_getdata_check(request) if schedule_chk: data = schedule_chk else: schedule_chk = chrome_getdata_check(request) if schedule_chk: data = schedule_chk else: return HttpResponse("null", content_type="text/json") else: schedule_chk = chrome_getdata_check(request) if schedule_chk: data = schedule_chk else: data = {"title": "Check Intranet", "text": "You have a new notification that couldn't be loaded right now."} j = json.dumps(data) return HttpResponse(j, content_type="text/json")
python
{ "resource": "" }
q15681
lostitem_add_view
train
def lostitem_add_view(request): """Add a lostitem.""" if request.method == "POST": form = LostItemForm(request.POST) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.description = safe_html(obj.description) obj.save() messages.success(request, "Successfully added lost item.") return redirect("lostitem_view", obj.id) else: messages.error(request, "Error adding lost item.") else: form = LostItemForm() return render(request, "lostfound/lostitem_form.html", {"form": form, "action": "add"})
python
{ "resource": "" }
q15682
lostitem_modify_view
train
def lostitem_modify_view(request, item_id=None): """Modify a lostitem. id: lostitem id """ if request.method == "POST": lostitem = get_object_or_404(LostItem, id=item_id) form = LostItemForm(request.POST, instance=lostitem) if form.is_valid(): obj = form.save() logger.debug(form.cleaned_data) # SAFE HTML obj.description = safe_html(obj.description) obj.save() messages.success(request, "Successfully modified lost item.") return redirect("lostitem_view", obj.id) else: messages.error(request, "Error adding lost item.") else: lostitem = get_object_or_404(LostItem, id=item_id) form = LostItemForm(instance=lostitem) context = {"form": form, "action": "modify", "id": item_id, "lostitem": lostitem} return render(request, "lostfound/lostitem_form.html", context)
python
{ "resource": "" }
q15683
lostitem_delete_view
train
def lostitem_delete_view(request, item_id): """Delete a lostitem. id: lostitem id """ if request.method == "POST": try: a = LostItem.objects.get(id=item_id) if request.POST.get("full_delete", False): a.delete() messages.success(request, "Successfully deleted lost item.") else: a.found = True a.save() messages.success(request, "Successfully marked lost item as found!") except LostItem.DoesNotExist: pass return redirect("index") else: lostitem = get_object_or_404(LostItem, id=item_id) return render(request, "lostfound/lostitem_delete.html", {"lostitem": lostitem})
python
{ "resource": "" }
q15684
lostitem_view
train
def lostitem_view(request, item_id): """View a lostitem. id: lostitem id """ lostitem = get_object_or_404(LostItem, id=item_id) return render(request, "itemreg/item_view.html", {"item": lostitem, "type": "lost"})
python
{ "resource": "" }
q15685
founditem_modify_view
train
def founditem_modify_view(request, item_id=None): """Modify a founditem. id: founditem id """ if request.method == "POST": founditem = get_object_or_404(FoundItem, id=item_id) form = FoundItemForm(request.POST, instance=founditem) if form.is_valid(): obj = form.save() logger.debug(form.cleaned_data) # SAFE HTML obj.description = safe_html(obj.description) obj.save() messages.success(request, "Successfully modified found item.") return redirect("founditem_view", obj.id) else: messages.error(request, "Error adding found item.") else: founditem = get_object_or_404(FoundItem, id=item_id) form = FoundItemForm(instance=founditem) context = {"form": form, "action": "modify", "id": item_id, "founditem": founditem} return render(request, "lostfound/founditem_form.html", context)
python
{ "resource": "" }
q15686
founditem_delete_view
train
def founditem_delete_view(request, item_id): """Delete a founditem. id: founditem id """ if request.method == "POST": try: a = FoundItem.objects.get(id=item_id) if request.POST.get("full_delete", False): a.delete() messages.success(request, "Successfully deleted found item.") else: a.found = True a.save() messages.success(request, "Successfully marked found item as found!") except FoundItem.DoesNotExist: pass return redirect("index") else: founditem = get_object_or_404(FoundItem, id=item_id) return render(request, "lostfound/founditem_delete.html", {"founditem": founditem})
python
{ "resource": "" }
q15687
founditem_view
train
def founditem_view(request, item_id): """View a founditem. id: founditem id """ founditem = get_object_or_404(FoundItem, id=item_id) return render(request, "itemreg/item_view.html", {"item": founditem, "type": "found"})
python
{ "resource": "" }
q15688
files_view
train
def files_view(request): """The main filecenter view.""" hosts = Host.objects.visible_to_user(request.user) context = {"hosts": hosts} return render(request, "files/home.html", context)
python
{ "resource": "" }
q15689
files_auth
train
def files_auth(request): """Display authentication for filecenter.""" if "password" in request.POST: """ Encrypt the password with AES mode CFB. Create a random 32 char key, stored in a CLIENT-side cookie. Create a random 32 char IV, stored in a SERVER-side session. Store the encrypted ciphertext in a SERVER-side session. This ensures that neither side can decrypt the password without the information stored on the other end of the request. Both the server-side session variables and the client-side cookies are deleted when the user logs out. """ key = Random.new().read(32) iv = Random.new().read(16) obj = AES.new(key, AES.MODE_CFB, iv) message = request.POST.get("password") if isinstance(message, str): message = message.encode("utf-8") ciphertext = obj.encrypt(message) request.session["files_iv"] = base64.b64encode(iv).decode() request.session["files_text"] = base64.b64encode(ciphertext).decode() cookie_key = base64.b64encode(key).decode() nexturl = request.GET.get("next", None) if nexturl and nexturl.startswith("/files"): response = redirect(nexturl) else: response = redirect("files") response.set_cookie(key="files_key", value=cookie_key) if "username" in request.POST: request.session["filecenter_username"] = request.POST.get("username") return response else: return render(request, "files/auth.html", {"is_admin": request.user.member_of("admin_all")})
python
{ "resource": "" }
q15690
get_authinfo
train
def get_authinfo(request): """Get authentication info from the encrypted message.""" if (("files_iv" not in request.session) or ("files_text" not in request.session) or ("files_key" not in request.COOKIES)): return False """ Decrypt the password given the SERVER-side IV, SERVER-side ciphertext, and CLIENT-side key. See note above on why this is done. """ iv = base64.b64decode(request.session["files_iv"]) text = base64.b64decode(request.session["files_text"]) key = base64.b64decode(request.COOKIES["files_key"]) obj = AES.new(key, AES.MODE_CFB, iv) password = obj.decrypt(text) username = request.session["filecenter_username"] if "filecenter_username" in request.session else request.user.username return {"username": username, "password": password}
python
{ "resource": "" }
q15691
highlight
train
def highlight(str1, str2): """Highlight str1 with the contents of str2.""" print('------------------------------') try: str2 = str2[0] except IndexError: str2 = None if str1 and str2: return str1.replace(str2, "<b>{}</b>".format(str2)) else: return str1
python
{ "resource": "" }
q15692
Command.handle
train
def handle(self, **options): """Exported "eighth_absentees" table in CSV format.""" with open('eighth_absentees.csv', 'r') as absopen: absences = csv.reader(absopen) for row in absences: bid, uid = row try: usr = User.objects.get(id=uid) except User.DoesNotExist: self.stdout.write("User {} doesn't exist, bid {}".format(usr, bid)) else: try: blk = EighthBlock.objects.get(id=bid) except EighthBlock.DoesNotExist: self.stdout.write("Block {} doesn't exist, with user {}".format(bid, uid)) else: usr_signup = EighthSignup.objects.filter(user=usr, scheduled_activity__block=blk) self.stdout.write("{} signup: {}".format(usr, usr_signup)) if usr_signup.count() == 0: other_abs_act, _ = EighthActivity.objects.get_or_create(name="z-OTHER ABSENCE (transferred from Iodine)", administrative=True) other_abs_sch, _ = EighthScheduledActivity.objects.get_or_create(block=blk, activity=other_abs_act) other_abs_su = EighthSignup.objects.create(user=usr, scheduled_activity=other_abs_sch, was_absent=True) self.stdout.write("{} Signup on {} created: {}".format(usr, bid, other_abs_su)) else: s = usr_signup[0] s.was_absent = True s.save() sa = s.scheduled_activity sa.attendance_taken = True sa.save() self.stdout.write("{} Signup on {} modified: {}".format(usr, bid, usr_signup[0])) self.stdout.write("Done.")
python
{ "resource": "" }
q15693
KerberosAuthenticationBackend.kinit_timeout_handle
train
def kinit_timeout_handle(username, realm): """Check if the user exists before we throw an error.""" try: u = User.objects.get(username__iexact=username) except User.DoesNotExist: logger.warning("kinit timed out for {}@{} (invalid user)".format(username, realm)) return logger.critical("kinit timed out for {}".format(realm), extra={ "stack": True, "data": { "username": username }, "sentry.interfaces.User": { "id": u.id, "username": username, "ip_address": "127.0.0.1" } })
python
{ "resource": "" }
q15694
KerberosAuthenticationBackend.get_kerberos_ticket
train
def get_kerberos_ticket(username, password): """Attempts to create a Kerberos ticket for a user. Args: username The username. password The password. Returns: Boolean indicating success or failure of ticket creation """ cache = "/tmp/ion-%s" % uuid.uuid4() logger.debug("Setting KRB5CCNAME to 'FILE:{}'".format(cache)) os.environ["KRB5CCNAME"] = "FILE:" + cache try: realm = settings.CSL_REALM kinit = pexpect.spawnu("/usr/bin/kinit {}@{}".format(username, realm), timeout=settings.KINIT_TIMEOUT) kinit.expect(":") kinit.sendline(password) returned = kinit.expect([pexpect.EOF, "password:"]) if returned == 1: logger.debug("Password for {}@{} expired, needs reset".format(username, realm)) return "reset" kinit.close() exitstatus = kinit.exitstatus except pexpect.TIMEOUT: KerberosAuthenticationBackend.kinit_timeout_handle(username, realm) exitstatus = 1 if exitstatus != 0: try: realm = settings.AD_REALM kinit = pexpect.spawnu("/usr/bin/kinit {}@{}".format(username, realm), timeout=settings.KINIT_TIMEOUT) kinit.expect(":") kinit.sendline(password) returned = kinit.expect([pexpect.EOF, "password:"]) if returned == 1: return False kinit.close() exitstatus = kinit.exitstatus except pexpect.TIMEOUT: KerberosAuthenticationBackend.kinit_timeout_handle(username, realm) exitstatus = 1 if "KRB5CCNAME" in os.environ: subprocess.check_call(['kdestroy', '-c', os.environ["KRB5CCNAME"]]) del os.environ["KRB5CCNAME"] if exitstatus == 0: logger.debug("Kerberos authorized {}@{}".format(username, realm)) return True else: logger.debug("Kerberos failed to authorize {}".format(username)) return False
python
{ "resource": "" }
q15695
get_fcps_emerg
train
def get_fcps_emerg(request): """Return FCPS emergency information.""" try: emerg = get_emerg() except Exception: logger.info("Unable to fetch FCPS emergency info") emerg = {"status": False} if emerg["status"] or ("show_emerg" in request.GET): msg = emerg["message"] return "{} <span style='display: block;text-align: right'>&mdash; FCPS</span>".format(msg) return False
python
{ "resource": "" }
q15696
gen_schedule
train
def gen_schedule(user, num_blocks=6, surrounding_blocks=None): """Generate a list of information about a block and a student's current activity signup. Returns: schedule no_signup_today """ no_signup_today = None schedule = [] if surrounding_blocks is None: surrounding_blocks = EighthBlock.objects.get_upcoming_blocks(num_blocks) if len(surrounding_blocks) == 0: return None, False # Use select_related to reduce query count signups = (EighthSignup.objects.filter(user=user, scheduled_activity__block__in=surrounding_blocks).select_related( "scheduled_activity", "scheduled_activity__block", "scheduled_activity__activity")) block_signup_map = {s.scheduled_activity.block.id: s.scheduled_activity for s in signups} for b in surrounding_blocks: current_sched_act = block_signup_map.get(b.id, None) if current_sched_act: current_signup = current_sched_act.title_with_flags current_signup_cancelled = current_sched_act.cancelled current_signup_sticky = current_sched_act.activity.sticky rooms = current_sched_act.get_scheduled_rooms() else: current_signup = None current_signup_cancelled = False current_signup_sticky = False rooms = None # warning flag (red block text and signup link) if no signup today # cancelled flag (red activity text) if cancelled flags = "locked" if b.locked else "open" blk_today = b.is_today() if blk_today and not current_signup: flags += " warning" if current_signup_cancelled: flags += " cancelled warning" if current_signup_cancelled: # don't duplicate this info; already caught current_signup = current_signup.replace(" (Cancelled)", "") info = { "id": b.id, "block": b, "block_letter": b.block_letter, "current_signup": current_signup, "current_signup_cancelled": current_signup_cancelled, "current_signup_sticky": current_signup_sticky, "locked": b.locked, "date": b.date, "flags": flags, "is_today": blk_today, "signup_time": b.signup_time, "signup_time_future": b.signup_time_future, "rooms": rooms } schedule.append(info) if blk_today and not current_signup: no_signup_today = True return schedule, no_signup_today
python
{ "resource": "" }
q15697
find_birthdays
train
def find_birthdays(request): """Return information on user birthdays.""" today = date.today() custom = False yr_inc = 0 if "birthday_month" in request.GET and "birthday_day" in request.GET: try: mon = int(request.GET["birthday_month"]) day = int(request.GET["birthday_day"]) yr = today.year """ If searching a date that already happened this year, skip to the next year. """ if mon < today.month or (mon == today.month and day < today.day): yr += 1 yr_inc = 1 real_today = today today = date(yr, mon, day) if today: custom = True else: today = real_today except ValueError: pass key = "birthdays:{}".format(today) cached = cache.get(key) if cached: logger.debug("Birthdays on {} loaded " "from cache.".format(today)) logger.debug(cached) return cached else: logger.debug("Loading and caching birthday info for {}".format(today)) tomorrow = today + timedelta(days=1) try: data = { "custom": custom, "today": { "date": today, "users": [{ "id": u.id, "full_name": u.full_name, "grade": { "name": u.grade.name }, "age": (u.age + yr_inc) if u.age is not None else -1, "public": u.properties.attribute_is_public("show_birthday") } if u else {} for u in User.objects.users_with_birthday(today.month, today.day)], "inc": 0, }, "tomorrow": { "date": tomorrow, "users": [{ "id": u.id, "full_name": u.full_name, "grade": { "name": u.grade.name }, "age": (u.age - 1) if u.age is not None else -1, "public": u.properties.attribute_is_public("show_birthday") } for u in User.objects.users_with_birthday(tomorrow.month, tomorrow.day)], "inc": 1, }, } # yapf: disable except AttributeError: return None else: cache.set(key, data, timeout=60 * 60 * 6) return data
python
{ "resource": "" }
q15698
find_visible_birthdays
train
def find_visible_birthdays(request, data): """Return only the birthdays visible to current user. """ if request.user and (request.user.is_teacher or request.user.is_eighthoffice or request.user.is_eighth_admin): return data data['today']['users'] = [u for u in data['today']['users'] if u['public']] data['tomorrow']['users'] = [u for u in data['tomorrow']['users'] if u['public']] return data
python
{ "resource": "" }
q15699
EighthActivity._name_with_flags
train
def _name_with_flags(self, include_restricted, title=None): """Generate the name with flags.""" name = "Special: " if self.special else "" name += self.name if title: name += " - {}".format(title) if include_restricted and self.restricted: name += " (R)" name += " (BB)" if self.both_blocks else "" name += " (A)" if self.administrative else "" name += " (S)" if self.sticky else "" name += " (Deleted)" if self.deleted else "" return name
python
{ "resource": "" }