INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Get the name of the file containing configuration overrides from the provided environment variable.
def get_overrides_filename(variable): """ Get the name of the file containing configuration overrides from the provided environment variable. """ filename = os.environ.get(variable) if filename is None: msg = 'Please set the {} environment variable.'.format(variable) raise EnvironmentError(msg) return filename
Parameters ---------- output_category: str inputs: epw idf table: summary table other: other
def get_output_files_layout(output_category): """ Parameters ---------- output_category: str inputs: epw, idf table: summary table other: other """ # check category if output_category not in ("inputs", "table", "other"): raise RuntimeError(f"unknown {output_category}") # get version dict layouts = _layouts_matrix[OS_NAME][output_category] # get version return get_value_by_version(layouts)
Finds the value depending in current eplus version.
def get_value_by_version(d): """ Finds the value depending in current eplus version. Parameters ---------- d: dict {(0, 0): value, (x, x): value, ...} for current version (cv), current value is the value of version v such as v <= cv < v+1 """ from oplus import CONF # touchy import cv = CONF.eplus_version[:2] for v, value in sorted(d.items(), reverse=True): if cv >= v: return value
works inplace
def switch_to_datetime_instants(df, start_year, eplus_frequency): """ works inplace """ # timestep -> monthly if eplus_frequency in (TIMESTEP, DAILY, HOURLY, MONTHLY): # prepare year switch if eplus_frequency in (TIMESTEP, HOURLY, DAILY): # print((df[["month", "day"]] - df[["month", "day"]].shift()) == pd.Series([-12, -31])) year_counter = ( (df[["month", "day"]] - df[["month", "day"]].shift()) == pd.Series(dict(month=12, day=-31)) ).all(axis=1).cumsum() else: year_counter = ((df["month"] - df["month"].shift()) == -12).cumsum() # add year columns df["year"] = year_counter + start_year # create index columns = { TIMESTEP: ("year", "month", "day", "hour", "minute"), HOURLY: ("year", "month", "day", "hour"), DAILY: ("year", "month", "day"), MONTHLY: ("year", "month") }[eplus_frequency] if eplus_frequency == MONTHLY: df.index = df.apply( # apply transforms ints to floats, we need to re-cast lambda x: dt.datetime(*(tuple(int(x[k]) for k in columns) + (1,))), axis=1 ) else: df.index = df.apply(lambda x: dt.datetime(*(int(x[k]) for k in columns)), axis=1) # drop old columns df.drop(columns=list(columns), inplace=True) # force frequency if eplus_frequency == TIMESTEP: # find freq ts = df.index[1] - df.index[0] # force forced_df = df.asfreq(ts) else: forced_df = df.asfreq({ HOURLY: "H", DAILY: "D", MONTHLY: "MS" }[eplus_frequency]) # if timestep, hourly or daily, check did not change (only those can suffer from leap year problems) if eplus_frequency in (TIMESTEP, HOURLY, DAILY): try: assert_index_equal(df.index, forced_df.index) except AssertionError: raise ValueError( f"Couldn't convert to datetime instants (frequency: {eplus_frequency}). Probable cause : " f"given start year ({start_year}) is incorrect and data can't match because of leap year issues." ) from None return forced_df # annual if eplus_frequency == ANNUAL: # check first year if df["year"].iloc[0] != start_year: raise ValueError( f"Given start year ({start_year}) differs from annual output data first year ({df['year'].iloc[0]})," f"can't switch to datetime instants.") df.index = df["year"].map(lambda x: dt.datetime(x, 1, 1)) del df["year"] # force freq df = df.asfreq("YS") return df # run period if eplus_frequency == RUN_PERIOD: return df raise AssertionError("should not be here")
if _eplus_version is defined = > _eplus_version else most recent eplus available version
def eplus_version(self): """ if _eplus_version is defined => _eplus_version else most recent eplus available version """ # check energy plus is installed if len(self.eplus_available_versions) == 0: raise RuntimeError("Energy plus is not install, can't use oplus package.") # see if version is defined if self._eplus_version is not None: return self._eplus_version # return most recent version return sorted(self.eplus_available_versions.keys(), reverse=True)[0]
Parameters ---------- df
def _check_and_sanitize_datetime_instants(df): """ Parameters ---------- df Returns ------- sanitized df """ # leave if not relevant if df is None or len(df) == 0: return df # check datetime index if not isinstance(df.index, pd.DatetimeIndex): raise ValueError("df index must be a datetime index.") # force frequency if needed if df.index.freq != "H": forced_df = df.asfreq("H") # check no change try: assert_index_equal(df.index, forced_df.index) except AssertionError: raise ValueError( f"Couldn't convert to hourly datetime instants. Probable cause : " f"given start instant ({df.index[0]}) is incorrect and data can't match because of leap year issues." ) from None # replace old variable df = forced_df # check first minute is 0 if df.index[0].minute != 0: raise ValueError("Minutes must be 0.") return df
Returns ------- ( start end )
def get_bounds(self): """ Returns ------- (start, end) Datetime instants of beginning and end of data. If no data, will be: (None, None). """ start, end = None, None if len(self._weather_series) == 0: return start, end for i in (0, -1): # create or find instant if self.has_tuple_instants: row = self._weather_series.iloc[i, :] instant = dt.datetime(row["year"], row["month"], row["day"], row["hour"], row["minute"]) else: instant = self._weather_series.index[i].to_pydatetime() # store if i == 0: start = instant else: end = instant return start, end
Parameters ---------- buffer_or_path: buffer or path containing epw format.
def from_epw(cls, buffer_or_path): """ Parameters ---------- buffer_or_path: buffer or path containing epw format. Returns ------- WeatherData instance. """ from .epw_parse import parse_epw _, buffer = to_buffer(buffer_or_path) with buffer as f: return parse_epw(f)
Parameters ---------- buffer_or_path: buffer or path default None Buffer or path to write into. If None will return a string containing epw info.
def to_epw(self, buffer_or_path=None): """ Parameters ---------- buffer_or_path: buffer or path, default None Buffer or path to write into. If None, will return a string containing epw info. Returns ------- None or a string if buffer_or_path is None. """ # copy and change hours convention [0, 23] -> [1, 24] df = self._weather_series.copy() df["hour"] += 1 epw_content = self._headers_to_epw() + df.to_csv(header=False, index=False, line_terminator="\n") return multi_mode_write( lambda buffer: buffer.write(epw_content), lambda: epw_content, buffer_or_path=buffer_or_path )
Records are created from string. They are not attached to idf yet. in idf: header comment chapter comments records in record: head comment field comments tail comment
def parse_idf(file_like): """ Records are created from string. They are not attached to idf yet. in idf: header comment, chapter comments, records in record: head comment, field comments, tail comment """ tables_data = {} head_comment = "" record_data = None make_new_record = True copyright_list = get_multi_line_copyright_message().split("\n") for i, raw_line in enumerate(file_like): # manage if copyright try: copyright_line = copyright_list[i] if raw_line.strip() == copyright_line: # skip copyright line continue except IndexError: pass # GET LINE CONTENT AND COMMENT split_line = raw_line.split("!") # no "!" in the raw_line if len(split_line) == 1: # this is an empty line if len(split_line[0].strip()) == 0: content, comment = None, None # this is a record line with no comments else: content, comment = split_line[0].strip(), None # there is at least one "!" in the raw_line else: # this is a comment line if len(split_line[0].strip()) == 0: content, comment = None, "!".join(split_line[1:]) # this is a record line with a comment else: content, comment = split_line[0].strip(), "!".join(split_line[1:]) # SKIP CURRENT LINE IF VOID if (content, comment) == (None, None): continue # NO CONTENT if not content: if record_data is None: # we only manage head idf comment head_comment += comment.strip() + "\n" continue # CONTENT # check if record end and prepare record_end = content[-1] == ";" content = content[:-1] # we tear comma or semi-colon content_l = [text.strip() for text in content.split(",")] # record creation if needed if make_new_record: # get table ref table_ref = table_name_to_ref(content_l[0].strip()) # skip if special table if table_ref.lower() in ( "lead input", "end lead input", "simulation data", "end simulation data" ): continue # declare table if necessary if table_ref not in tables_data: tables_data[table_ref] = [] # create and store record record_data = dict() tables_data[table_ref].append(record_data) # prepare in case fields on the same line content_l = content_l[1:] make_new_record = False # fields for value_s in content_l: field_index = len(record_data) record_data[field_index] = value_s # signal that new record must be created if record_end: make_new_record = True # add comment key tables_data["_comment"] = head_comment return tables_data
Parameters ---------- epm_or_idf_path: weather_data_or_epw_path simulation_dir_path stdout: default sys. stdout stderr: default sys. stderr beat_freq: if not none stdout will be used at least every beat_freq ( in seconds )
def run_eplus(epm_or_idf_path, weather_data_or_epw_path, simulation_dir_path, stdout=None, stderr=None, beat_freq=None): """ Parameters ---------- epm_or_idf_path: weather_data_or_epw_path simulation_dir_path stdout: default sys.stdout stderr: default sys.stderr beat_freq: if not none, stdout will be used at least every beat_freq (in seconds) """ # work with absolute paths simulation_dir_path = os.path.abspath(simulation_dir_path) # check dir path if not os.path.isdir(simulation_dir_path): raise NotADirectoryError("Simulation directory does not exist: '%s'." % simulation_dir_path) # epm if not isinstance(epm_or_idf_path, Epm): # we don't copy file directly because we want to manage it's external files # could be optimized (use _copy_without_read_only) epm = Epm.from_idf(epm_or_idf_path) else: epm = epm_or_idf_path # create idf simulation_idf_path = os.path.join(simulation_dir_path, CONF.default_model_name + ".idf") epm.to_idf(simulation_idf_path) # weather data simulation_epw_path = os.path.join(simulation_dir_path, CONF.default_model_name + ".epw") if isinstance(weather_data_or_epw_path, WeatherData): weather_data_or_epw_path.to_epw(simulation_epw_path) else: # no need to load: we copy directly _copy_without_read_only(weather_data_or_epw_path, simulation_epw_path) # copy epw if needed (depends on os/eplus version) temp_epw_path = get_simulated_epw_path() if temp_epw_path is not None: _copy_without_read_only(simulation_epw_path, temp_epw_path) # prepare command eplus_relative_cmd = get_simulation_base_command() eplus_cmd = os.path.join(CONF.eplus_base_dir_path, eplus_relative_cmd) # idf idf_command_style = get_simulation_input_command_style("idf") if idf_command_style == SIMULATION_INPUT_COMMAND_STYLES.simu_dir: idf_file_cmd = os.path.join(simulation_dir_path, CONF.default_model_name) elif idf_command_style == SIMULATION_INPUT_COMMAND_STYLES.file_path: idf_file_cmd = simulation_idf_path else: raise AssertionError("should not be here") # epw epw_command_style = get_simulation_input_command_style("epw") if epw_command_style == SIMULATION_INPUT_COMMAND_STYLES.simu_dir: epw_file_cmd = os.path.join(simulation_dir_path, CONF.default_model_name) elif epw_command_style == SIMULATION_INPUT_COMMAND_STYLES.file_path: epw_file_cmd = simulation_epw_path else: raise AssertionError("should not be here") # command list simulation_command_style = get_simulation_command_style() if simulation_command_style == SIMULATION_COMMAND_STYLES.args: cmd_l = [eplus_cmd, idf_file_cmd, epw_file_cmd] elif simulation_command_style == SIMULATION_COMMAND_STYLES.kwargs: cmd_l = [eplus_cmd, "-w", epw_file_cmd, "-r", idf_file_cmd] else: raise RuntimeError("should not be here") # launch calculation run_subprocess( cmd_l, cwd=simulation_dir_path, stdout=stdout, stderr=stderr, beat_freq=beat_freq ) # if needed, we delete temp weather data (only on Windows, see above) if (temp_epw_path is not None) and os.path.isfile(temp_epw_path): os.remove(os.path.join(temp_epw_path))
Parameters ---------- epm_or_path weather_data_or_path base_dir_path: simulation dir path simulation_name: str default None if provided simulation will be done in { base_dir_path }/ { simulation_name } else simulation will be done in { base_dir_path } stdout: stream default logger. info stream where EnergyPlus standard output is redirected stderr: stream default logger. error stream where EnergyPlus standard error is redirected beat_freq: float default None if provided subprocess in which EnergyPlus is run will write at given frequency in standard output. May be used to monitor subprocess state.
def simulate( cls, epm_or_path, weather_data_or_path, base_dir_path, simulation_name=None, stdout=None, stderr=None, beat_freq=None ): """ Parameters ---------- epm_or_path weather_data_or_path base_dir_path: simulation dir path simulation_name: str, default None if provided, simulation will be done in {base_dir_path}/{simulation_name} else, simulation will be done in {base_dir_path} stdout: stream, default logger.info stream where EnergyPlus standard output is redirected stderr: stream, default logger.error stream where EnergyPlus standard error is redirected beat_freq: float, default None if provided, subprocess in which EnergyPlus is run will write at given frequency in standard output. May be used to monitor subprocess state. Returns ------- Simulation instance """ # manage simulation dir path if not os.path.isdir(base_dir_path): raise NotADirectoryError("Base dir path not found: '%s'" % base_dir_path) simulation_dir_path = base_dir_path if simulation_name is None else os.path.join(base_dir_path, simulation_name) # make directory if does not exist if not os.path.exists(simulation_dir_path): os.mkdir(simulation_dir_path) # run simulation stdout = LoggerStreamWriter(logger_name=__name__, level=logging.INFO) if stdout is None else stdout stderr = LoggerStreamWriter(logger_name=__name__, level=logging.ERROR) if stderr is None else stderr run_eplus( epm_or_path, weather_data_or_path, simulation_dir_path, stdout=stdout, stderr=stderr, beat_freq=beat_freq ) # return simulation object return cls( base_dir_path, simulation_name=simulation_name )
Defined here so that we can use the class variables in order to subclass in oplusplus
def _file_refs(self): """ Defined here so that we can use the class variables, in order to subclass in oplusplus """ if self._prepared_file_refs is None: self._prepared_file_refs = { FILE_REFS.idf: FileInfo( constructor=lambda path: self._epm_cls.from_idf(path, idd_or_buffer_or_path=self._idd), get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.idf) ), FILE_REFS.epw: FileInfo( constructor=lambda path: self._weather_data_cls.from_epw(path), get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.epw) ), FILE_REFS.eio: FileInfo( constructor=lambda path: self._eio_cls(path), get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.eio) ), FILE_REFS.eso: FileInfo( constructor=lambda path: self._standard_output_cls(path), get_path=lambda: get_output_file_path( self.dir_path, FILE_REFS.eso ) ), FILE_REFS.mtr: FileInfo( constructor=lambda path: self._standard_output_cls(path), get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtr) ), FILE_REFS.mtd: FileInfo( constructor=lambda path: self._mtd_cls(path), get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtd) ), FILE_REFS.mdd: FileInfo( constructor=lambda path: open(path).read(), get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mdd) ), FILE_REFS.err: FileInfo( constructor=lambda path: self._err_cls(path), get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.err) ), FILE_REFS.summary_table: FileInfo( constructor=lambda path: self._summary_table_cls(path), get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.summary_table) ) } return self._prepared_file_refs
Parameters ---------- file_ref: str reference of file. Available references: idf epw eio eso mtr mtd mdd err summary_table See EnergyPlus documentation for more information.
def exists(self, file_ref): """ Parameters ---------- file_ref: str reference of file. Available references: 'idf', 'epw', 'eio', 'eso', 'mtr', 'mtd', 'mdd', 'err', 'summary_table' See EnergyPlus documentation for more information. Returns ------- Boolean """ if file_ref not in FILE_REFS: raise ValueError("Unknown file_ref: '%s'. Available: '%s'." % (file_ref, list(sorted(FILE_REFS._fields)))) return os.path.isfile(self._path(file_ref))
Parameters ---------- file_ref: str reference of file. Available references: idf epw eio eso mtr mtd mdd err summary_table See EnergyPlus documentation for more information.
def get_file_path(self, file_ref): """ Parameters ---------- file_ref: str reference of file. Available references: 'idf', 'epw', 'eio', 'eso', 'mtr', 'mtd', 'mdd', 'err', 'summary_table' See EnergyPlus documentation for more information. Returns ------- Instance of required output. """ if not self.exists(file_ref): raise FileNotFoundError("File '%s' not found in simulation '%s'." % (file_ref, self._path(file_ref))) return self._path(file_ref)
Parameters ---------- model_name: with or without extension
def default_external_files_dir_name(model_name): """ Parameters ---------- model_name: with or without extension """ name, ext = os.path.splitext(model_name) return name + CONF.external_files_suffix
!! Must only be called once when empty !!
def _dev_populate_from_json_data(self, json_data): """ !! Must only be called once, when empty !! """ # workflow # -------- # (methods belonging to create/update/delete framework: # epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete) # 1. add inert # * data is checked # * old links are unregistered # * record is stored in table (=> pk uniqueness is checked) # 2. activate: hooks, links, external files # manage comment if any comment = json_data.pop("_comment", None) if comment is not None: self._comment = comment # populate external files external_files_data = json_data.pop("_external_files", dict()) self._dev_external_files_manager.populate_from_json_data(external_files_data) # manage records added_records = [] for table_ref, json_data_records in json_data.items(): # find table table = getattr(self, table_ref) # create record (inert) records = table._dev_add_inert(json_data_records) # add records (inert) added_records.extend(records) # activate hooks for r in added_records: r._dev_activate_hooks() # activate links and external files for r in added_records: r._dev_activate_links() r._dev_activate_external_files()
An external file manages file paths.
def get_external_files(self): """ An external file manages file paths. """ external_files = [] for table in self._tables.values(): for r in table: external_files.extend([ef for ef in r.get_external_files()]) return external_files
All fields of Epm with a default value and that are null will be set to their default value.
def set_defaults(self): """ All fields of Epm with a default value and that are null will be set to their default value. """ for table in self._tables.values(): for r in table: r.set_defaults()
Parameters ---------- json_data: dict Dictionary of serialized data ( text floats ints... ). For more information on data structure create an Epm and use to_json_data or to_json. check_required: boolean default True If True will raise an exception if a required field is missing. If False not not perform any checks. idd_or_buffer_or_path: ( expert ) to load using a custom idd
def from_json_data(cls, json_data, check_required=True, idd_or_buffer_or_path=None): """ Parameters ---------- json_data: dict Dictionary of serialized data (text, floats, ints, ...). For more information on data structure, create an Epm and use to_json_data or to_json. check_required: boolean, default True If True, will raise an exception if a required field is missing. If False, not not perform any checks. idd_or_buffer_or_path: (expert) to load using a custom idd Returns ------- An Epm instance. """ epm = cls( idd_or_buffer_or_path=idd_or_buffer_or_path, check_required=check_required ) epm._dev_populate_from_json_data(json_data) return epm
Parameters ---------- buffer_or_path: idf buffer or path check_required: boolean default True If True will raise an exception if a required field is missing. If False not not perform any checks. idd_or_buffer_or_path: ( expert ) to load using a custom idd
def from_idf(cls, buffer_or_path, check_required=True, idd_or_buffer_or_path=None): """ Parameters ---------- buffer_or_path: idf buffer or path check_required: boolean, default True If True, will raise an exception if a required field is missing. If False, not not perform any checks. idd_or_buffer_or_path: (expert) to load using a custom idd Returns ------- An Epm instance. """ # todo: add geometry only (or equivalent) return cls._create_from_buffer_or_path( parse_idf, buffer_or_path, idd_or_buffer_or_path=idd_or_buffer_or_path, check_required=check_required )
Parameters ---------- buffer_or_path: json buffer or path check_required: boolean default True If True will raise an exception if a required field is missing. If False not not perform any checks. idd_or_buffer_or_path: ( expert ) to load using a custom idd
def from_json(cls, buffer_or_path, check_required=True, idd_or_buffer_or_path=None): """ Parameters ---------- buffer_or_path: json buffer or path check_required: boolean, default True If True, will raise an exception if a required field is missing. If False, not not perform any checks. idd_or_buffer_or_path: (expert) to load using a custom idd Returns ------- An Epm instance. """ return cls._create_from_buffer_or_path( json.load, buffer_or_path, idd_or_buffer_or_path=idd_or_buffer_or_path, check_required=check_required )
Returns ------- A dictionary of serialized data.
def to_json_data(self): """ Returns ------- A dictionary of serialized data. """ # create data d = collections.OrderedDict((t.get_ref(), t.to_json_data()) for t in self._tables.values()) d["_comment"] = self._comment d.move_to_end("_comment", last=False) d["_external_files"] = self._dev_external_files_manager return d
Parameters ---------- buffer_or_path: buffer or path default None output to write into. If None will return a json string. indent: int default 2 Defines the indentation of the json
def to_json(self, buffer_or_path=None, indent=2): """ Parameters ---------- buffer_or_path: buffer or path, default None output to write into. If None, will return a json string. indent: int, default 2 Defines the indentation of the json Returns ------- None, or a json string (if buffer_or_path is None). """ # return json return json_data_to_json( self.to_json_data(), buffer_or_path=buffer_or_path, indent=indent )
Parameters ---------- buffer_or_path: buffer or path default None output to write into. If None will return a json string. dump_external_files: boolean default True if True external files will be dumped in external files directory
def to_idf(self, buffer_or_path=None, dump_external_files=True): """ Parameters ---------- buffer_or_path: buffer or path, default None output to write into. If None, will return a json string. dump_external_files: boolean, default True if True, external files will be dumped in external files directory Returns ------- None, or an idf string (if buffer_or_path is None). """ # prepare comment comment = get_multi_line_copyright_message() if self._comment != "": comment += textwrap.indent(self._comment, "! ", lambda line: True) comment += "\n\n" # prepare external files dir path if file path if isinstance(buffer_or_path, str): dir_path, file_name = os.path.split(buffer_or_path) model_name, _ = os.path.splitext(file_name) else: model_name, dir_path = None, os.path.curdir # dump files if asked if dump_external_files: self.dump_external_files( target_dir_path=os.path.join(dir_path, get_external_files_dir_name(model_name=model_name)) ) # prepare body formatted_records = [] for table_ref, table in self._tables.items(): # self._tables is already sorted formatted_records.extend([r.to_idf(model_name=model_name) for r in sorted(table)]) body = "\n\n".join(formatted_records) # return content = comment + body return multi_mode_write( lambda f: f.write(content), lambda: content, buffer_or_path )
Parameters ---------- filter_by: callable default None Callable must take one argument ( a record of queryset ) and return True to keep record or False to skip it. Example:. select ( lambda x: x. name == my_name ). If None records are not filtered.
def select(self, filter_by=None): """ Parameters ---------- filter_by: callable, default None Callable must take one argument (a record of queryset), and return True to keep record, or False to skip it. Example : .select(lambda x: x.name == "my_name"). If None, records are not filtered. Returns ------- Queryset instance, containing all selected records. """ iterator = self._records if filter_by is None else filter(filter_by, self._records) return Queryset(self._table, iterator)
Parameters ---------- filter_by: callable default None Callable must take one argument ( a record of table ) and return True to keep record or False to skip it. Example:. one ( lambda x: x. name == my_name ). If None records are not filtered.
def one(self, filter_by=None): """ Parameters ---------- filter_by: callable, default None Callable must take one argument (a record of table), and return True to keep record, or False to skip it. Example : .one(lambda x: x.name == "my_name"). If None, records are not filtered. Returns ------- Record instance if one and only one record is found. Else raises. Raises ------ RecordDoesNotExistError if no record is found MultipleRecordsReturnedError if multiple records are found """ # filter if needed qs = self if filter_by is None else self.select(filter_by=filter_by) # check one and only one if len(qs) == 0: raise RecordDoesNotExistError("Queryset set contains no value.") if len(qs) > 1: raise MultipleRecordsReturnedError("Queryset contains more than one value.") # return record return qs[0]
Returns ------- None if epw can be anywhere
def get_simulated_epw_path(): """ Returns ------- None if epw can be anywhere """ from oplus import CONF # touchy imports if OS_NAME == "windows": return os.path.join(CONF.eplus_base_dir_path, "WeatherData", "%s.epw" % CONF.default_model_name)
This function finishes initialization must be called once all field descriptors and tag have been filled.
def prepare_extensible(self): """ This function finishes initialization, must be called once all field descriptors and tag have been filled. """ # see if extensible and store cycle len for k in self._tags: if "extensible" in k: cycle_len = int(k.split(":")[1]) break else: # not extensible return # find cycle start and prepare patterns cycle_start = None cycle_patterns = [] for i, field_descriptor in enumerate(self._field_descriptors): # quit if finished if (cycle_start is not None) and (i >= (cycle_start + cycle_len)): break # set cycle start if not set yet if (cycle_start is None) and ("begin-extensible" in field_descriptor.tags): cycle_start = i # leave if cycle start not reached yet if cycle_start is None: continue # store pattern cycle_patterns.append(field_descriptor.ref.replace("1", r"(\d+)")) else: raise RuntimeError("cycle start not found") # detach unnecessary field descriptors self._field_descriptors = self._field_descriptors[:cycle_start + cycle_len] # store cycle info self.extensible_info = (cycle_start, cycle_len, tuple(cycle_patterns)) # set field descriptor cycle_start index (for error messages while serialization) for i, fd in enumerate(self._field_descriptors[cycle_start:]): fd.set_extensible_info(cycle_start, cycle_len, cycle_patterns[i])
reduced index: modulo of extensible has been applied
def get_field_reduced_index(self, index): """ reduced index: modulo of extensible has been applied """ # return index if not extensible if self.extensible_info is None: return index # manage extensible cycle_start, cycle_len, _ = self.extensible_info # base field if index < cycle_start: return index # extensible field return cycle_start + ((index - cycle_start) % cycle_len)
manages extensible names
def get_extended_name(self, index): """ manages extensible names """ field_descriptor = self.get_field_descriptor(index) if self.extensible_info is None: return field_descriptor.name cycle_start, cycle_len, _ = self.extensible_info cycle_num = (index - cycle_start) // cycle_len return None if field_descriptor.name is None else field_descriptor.name.replace("1", str(cycle_num))
index is used for extensible fields error messages ( if given )
def deserialize(self, value, index): """ index is used for extensible fields error messages (if given) """ # -- serialize if not raw type # transform to string if external file if isinstance(value, ExternalFile): value = value.pointer # transform to string if record if isinstance(value, Record): try: value = value[0] except IndexError: raise ValueError("can't set given record because it does not have a name field") # -- prepare if string if isinstance(value, str): # change multiple spaces to mono spaces value = re.sub(spaces_and_newlines_pattern, lambda x: " ", value.strip()) # see if still not empty if value == "": return None # make ASCII compatible value = unidecode.unidecode(value) # make lower case if not retaincase if "retaincase" not in self.tags: value = value.lower() # check not too big if len(value) >= 100: raise FieldValidationError( f"Field has more than 100 characters which is the limit. " f"{self.get_error_location_message(value, index=index)}" ) # transform to external file if relevant if self.is_file_name: value = ExternalFile.deserialize(value) # -- deserialize # numeric types if self.detailed_type in ("integer", "real"): # manage none if value is None: return None # special values: auto-calculate, auto-size, use-weather-file if value in ("autocalculate", "autosize", "useweatherfile"): return value if self.detailed_type == "integer": try: return int(value) except: raise FieldValidationError( f"Couldn't parse to integer. {self.get_error_location_message(value, index=index)}" ) try: return float(value) except: raise FieldValidationError( f"Couldn't parse to float. {self.get_error_location_message(value, index=index)}" ) # simple string types if self.detailed_type in ("alpha", "choice", "node", "external-list"): # manage none if value is None: return None # ensure it was str if not isinstance_str(value): raise FieldValidationError( f"Value must be a string. {self.get_error_location_message(value, index=index)}" ) return value # manage hooks (eplus reference) if self.detailed_type == "reference": # manage None if value is None: return NONE_RECORD_HOOK # reference class name appears in v9.0.1 references = self.tags.get("reference", []) # table_name, index, value, references, class_references return RecordHook(references, index, value) # manage links (eplus object-list) if self.detailed_type == "object-list": # manage None if value is None: return NONE_LINK return Link(self.tags["object-list"], value, index) raise RuntimeError("should not be here")
Uses EPlus double approach of type ( type tag and/ or key object - list external - list reference tags ) to determine detailed type. Returns ------- integer real alpha choice reference object - list external - list node
def detailed_type(self): """ Uses EPlus double approach of type ('type' tag, and/or 'key', 'object-list', 'external-list', 'reference' tags) to determine detailed type. Returns ------- "integer", "real", "alpha", "choice", "reference", "object-list", "external-list", "node" """ if self._detailed_type is None: if ("reference" in self.tags) or ("reference-class-name" in self.tags): self._detailed_type = "reference" elif "type" in self.tags: self._detailed_type = self.tags["type"][0].lower() # idd is not very rigorous on case elif "key" in self.tags: self._detailed_type = "choice" elif "object-list" in self.tags: self._detailed_type = "object-list" elif "external-list" in self.tags: self._detailed_type = "external-list" elif self.basic_type == "A": self._detailed_type = "alpha" elif self.basic_type == "N": self._detailed_type = "real" else: raise ValueError("Can't find detailed type.") return self._detailed_type
we calculate on the fly to avoid managing registrations and un - registrations
def short_refs(self): """ we calculate on the fly to avoid managing registrations and un-registrations Returns ------- {ref: short_ref, ... """ naive_short_refs_d = dict() # naive_short_ref: {refs, ...} for ef in self._external_files: if ef.naive_short_ref not in naive_short_refs_d: naive_short_refs_d[ef.naive_short_ref] = set() naive_short_refs_d[ef.naive_short_ref].add(ef.ref) short_refs = dict() for naive_short_ref, refs in naive_short_refs_d.items(): if len(refs) == 1: short_refs[refs.pop()] = naive_short_ref continue base, ext = os.path.splitext(naive_short_ref) for i, ref in enumerate(sorted(refs)): short_refs[ref] = f"{base}-{i}.{ext}" return short_refs
Returns first occurrence of value of filter column matching filter criterion.
def get_value(self, column_name_or_i, filter_column_name_or_i, filter_criterion): """ Returns first occurrence of value of filter column matching filter criterion. """ # find column indexes column_i = self._get_column_index(column_name_or_i) filter_column_i = self._get_column_index(filter_column_name_or_i) filter_fct = { float: lambda x: float(x) == filter_criterion, int: lambda x: int(x) == filter_criterion, str: lambda x: x.lower() == filter_criterion.lower() }[type(filter_criterion)] for row_i, row in enumerate(self._data): if filter_fct(row[filter_column_i]): break else: raise ValueError("Filter did not return any values.") return self._data[row_i][column_i]
is only called by _update_inert
def _update_value_inert(self, index, value): """ is only called by _update_inert """ # get field descriptor field_descriptor = self._table._dev_descriptor.get_field_descriptor(index) # prepare value value = field_descriptor.deserialize(value, index) # unregister previous link if relevant if isinstance(value, Link): # de-activate current link if any current_link = self._data.get(index) if current_link is not None: current_link.unregister() # unregister previous hook if relevant if isinstance(value, RecordHook): current_record_hook = self._data.get(index) if current_record_hook is not None: current_record_hook.unregister() # unregister previous external file if relevant if isinstance(value, ExternalFile): current_external_file = self._data.get(index) if current_external_file is not None: current_external_file._dev_unregister() # if None remove and leave if value in (None, NONE_RECORD_HOOK, NONE_LINK, NONE_EXTERNAL_FILE): # we don't check required, because this method is called by _update_inert which does the job self._dev_set_none_without_unregistering(index, check_not_required=False) return # if relevant, store current pk to signal table old_hook = None if index == 0 and not self._table._dev_auto_pk: old_hook = self._data.get(0) # we use get, because record may not have a pk yet if it is being created # set value self._data[index] = value # signal pk update if relevant if old_hook is not None: self._table._dev_record_pk_was_updated(old_hook.target_value)
Parameters ---------- ref_or_index external_files_mode: str default path path pointer model_file_path: str default None if external files are asked in a relative fashion relative path will be calculated relatively to model_file_path if given else current directory
def get_serialized_value(self, ref_or_index, model_name=None): """ Parameters ---------- ref_or_index external_files_mode: str, default 'path' 'path', 'pointer' model_file_path: str, default None if external files are asked in a relative fashion, relative path will be calculated relatively to model_file_path if given, else current directory Returns ------- serialized value (only basic types: string, int, float, None, ...) """ index = ( self._table._dev_descriptor.get_field_index(ref_or_index) if isinstance(ref_or_index, str) else ref_or_index ) # get value value = self._data.get(index) # serialize value = value.serialize() if isinstance(value, (Link, RecordHook)) else value # manage file names if isinstance(value, ExternalFile): value = os.path.join(get_external_files_dir_name(model_name=model_name), value.naive_short_ref) return value
Returns ------- List of ExternalFiles instances contained by record.
def get_external_files(self): """ Returns ------- List of ExternalFiles instances contained by record. """ return [v for v in self._data.values() if isinstance(v, ExternalFile)]
Updates simultaneously all given fields.
def update(self, data=None, **or_data): """ Updates simultaneously all given fields. Parameters ---------- data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax) or_data: keyword arguments containing field names as keys (kwargs syntax) """ # workflow # -------- # (methods belonging to create/update/delete framework: # epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete) # 1. add inert # * data is checked # * old links are unregistered # * record is stored in table (=> pk uniqueness is checked) # 2. activate: hooks, links, external files data = or_data if data is None else data self._update_inert(data) self._dev_activate_hooks() self._dev_activate_links() self._dev_activate_external_files()
Parameters ---------- new_name: str default None record s new name ( if table has a name ). If None although record has a name a random uuid will be given.
def copy(self, new_name=None): """ Parameters ---------- new_name: str, default None record's new name (if table has a name). If None although record has a name, a random uuid will be given. Returns ------- Copied record. """ # todo: check this really works, !! must not use same link, hook, external_file, ... for different records !! # auto pk tables can just be copied if self._table._dev_auto_pk: return self._table.add(self._data) # for ref pk tables, must manage name name = str(uuid.uuid4()) if new_name is None else new_name new_data = dict((k, name if k == 0 else v) for (k, v) in self._data.items()) return self._table.add(new_data)
sets all empty fields for which a default value is defined to default value
def set_defaults(self): """ sets all empty fields for which a default value is defined to default value """ defaults = {} for i in range(len(self)): if i in self._data: continue default = self.get_field_descriptor(i).tags.get("default", [None])[0] if default is not None: defaults[i] = default self.update(defaults)
This method only works for extensible fields. It allows to add values without precising their fields names or indexes.
def add_fields(self, *args): """ This method only works for extensible fields. It allows to add values without precising their fields' names or indexes. Parameters ---------- args: field values """ if not self.is_extensible(): raise TypeError("Can't use add_fields on a non extensible record.") # prepare update data self_len = len(self) data = dict([(self_len + i, args[i]) for i in range(len(args))]) # update self.update(data)
This method only works for extensible fields. It allows to remove a value and shift all other values to fill the gap.
def pop(self, index=None): """ This method only works for extensible fields. It allows to remove a value and shift all other values to fill the gap. Parameters ---------- index: int, default None index of field to remove. Returns ------- serialize value of popped field """ # prepare index (will check for extensible) index = self._prepare_pop_insert_index(index=index) # get extensible info cycle_start, cycle_len, patterns = self.get_extensible_info() # remove extensible fields fields = self.clear_extensible_fields() # pop serialized_value = fields.pop(index-cycle_start) # add remaining self.add_fields(*fields) return serialized_value
This method only works for extensible fields. It allows to insert a value and shifts all other following values.
def insert(self, index, value): """ This method only works for extensible fields. It allows to insert a value, and shifts all other following values. Parameters ---------- index: position of insertion value: value to insert """ # prepare index (will check for extensible) index = self._prepare_pop_insert_index(index=index) # remove extensible fields fields = self.clear_extensible_fields() # insert fields.insert(index, value) # add new list self.add_fields(*fields)
Returns ------- list of cleared fields ( serialized )
def clear_extensible_fields(self): """ Returns ------- list of cleared fields (serialized) """ if not self.is_extensible(): raise TypeError("Can't use add_fields on a non extensible record.") cycle_start, cycle_len, patterns = self.get_extensible_info() return [self.get_serialized_value(i) for i in range(cycle_start, len(self))]
Deletes record and removes it from database.
def delete(self): """ Deletes record, and removes it from database. """ # workflow # -------- # (methods belonging to create/update/delete framework: # epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete) # 1. unregister: links, hooks and external files # 3. remove from table without unregistering # unregister links self._unregister_links() # unregister hooks self._unregister_hooks() # unregister external files self._unregister_external_files() # tell table to remove without unregistering self.get_table()._dev_remove_record_without_unregistering(self) # make stale self._table = None self._data = None
Parameters ---------- ref_or_index: str or int field lowercase name or field position
def get_field_descriptor(self, ref_or_index): """ Parameters ---------- ref_or_index: str or int field lowercase name, or field position Returns ------- Field descriptor (info contained in Idd) """ if isinstance(ref_or_index, int): index = ref_or_index else: index = self._table._dev_descriptor.get_field_index(ref_or_index) return self._table._dev_descriptor.get_field_descriptor(index)
Parameters ---------- model_name: str default None if given will be used as external file directory base name
def to_json_data(self, model_name=None): """ Parameters ---------- model_name: str, default None if given, will be used as external file directory base name Returns ------- A dictionary of serialized data. """ return collections.OrderedDict([(k, self.get_serialized_value(k, model_name=model_name )) for k in self._data])
Parameters ---------- model_name: str default None if given will be used as external file directory base name
def to_idf(self, model_name=None): """ Parameters ---------- model_name: str, default None if given, will be used as external file directory base name Returns ------- idf string """ json_data = self.to_json_data(model_name=model_name) # record descriptor ref s = f"{self._table._dev_descriptor.table_name},\n" # fields # fields_nb: we don't use len(self) but max(self). We wan't to stop if no more values (even base fields) # because some idd records are defined without extensibles (although they should used them), for example # construction, and eplus does not know what to do... fields_nb = max(self._data)+1 for i in range(fields_nb): # value tab = " " * TAB_LEN raw_value = json_data.get(i, "") content = f"{tab}{raw_value}{';' if i == fields_nb-1 else ','}" # comment spaces_nb = COMMENT_COLUMN_START - len(content) if spaces_nb < 0: spaces_nb = TAB_LEN # comment name = self._table._dev_descriptor.get_extended_name(i) comment = "" if name is None else " " * spaces_nb + f"! {name}" # store s += f"{content}{comment}\n" return s
Tested under EPlus 8. 1. 0 on Windows ( Geoffroy ).
def check(): """ Tested under EPlus 8.1.0 on Windows (Geoffroy). """ # !! CAN BE VERY LONG epw_path = os.path.join(CONF.eplus_base_dir_path, "WeatherData", "USA_VA_Sterling-Washington.Dulles.Intl.AP.724030_TMY3.epw") idf_dir_path = os.path.join(CONF.eplus_base_dir_path, "ExampleFiles") test_num = 0 for file_num, file_name in enumerate(os.listdir(idf_dir_path)): if file_num < START_FILE_NUM: continue base, ext = os.path.splitext(file_name) if ext == ".idf": with tempfile.TemporaryDirectory() as simulation_dir_path: s = simulate(os.path.join(idf_dir_path, file_name), epw_path, simulation_dir_path if DEBUG_SIMUL_DIR_PATH is None else DEBUG_SIMUL_DIR_PATH) if s.exists("eio"): eio = Eio(s.get_file_path("eio")) # raise error if problem test_num += 1 if test_num == MAX_TESTS_NB: break
Parameters ---------- environment_title_or_num frequency: str default None timestep hourly daily monthly annual run_period If None will look for the smallest frequency of environment.
def get_data(self, environment_title_or_num=-1, frequency=None): """ Parameters ---------- environment_title_or_num frequency: 'str', default None 'timestep', 'hourly', 'daily', 'monthly', 'annual', 'run_period' If None, will look for the smallest frequency of environment. """ # manage environment num if isinstance(environment_title_or_num, int): environment_title = tuple(self._raw_environments.keys())[environment_title_or_num] else: environment_title = environment_title_or_num if environment_title not in self._dfs: raise ValueError(f"No environment named {environment_title}. Available environments: {tuple(self._dfs)}.") # get environment dataframes environment_dfs = self._dfs[environment_title] # find first non null frequency if not given if frequency is None: for frequency in FREQUENCIES: if environment_dfs[frequency] is not None: break # check frequency if frequency not in FREQUENCIES: raise ValueError(f"Unknown frequency: {frequency}. Available frequencies: {FREQUENCIES}") return self._dfs[environment_title][frequency]
this hack is used to document add function a methods __doc__ attribute is read - only ( or must use metaclasses what I certainly don t want to do... ) we therefore create a function ( who s __doc__ attribute is read/ write ) and will bind it to Table in __init__
def get_documented_add(self, record_descriptors): """ this hack is used to document add function a methods __doc__ attribute is read-only (or must use metaclasses, what I certainly don't want to do...) we therefore create a function (who's __doc__ attribute is read/write), and will bind it to Table in __init__ """ def add(data=None, **or_data): """ Parameters ---------- data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax) or_data: keyword arguments containing field names as keys (kwargs syntax) A lowercase name is the lowercase EnergyPlus name, for which all non alpha-numeric characters have been replaced by underscores. All multiple consecutive underscores are then replaced by one unique underscore. The two syntaxes are not meant to cohabit. The kwargs syntax is nicer, but does not enable to use indexes instead of names. Examples -------- for Schedule:Compact table: schedule = table.add( # kwarg syntax name="Heating Setpoint Schedule - new[1]", schedule_type_limits_name="Any Number", field_1="Through: 12/31", field_2="For: AllDays", field_3="Until: 24:00,20.0" ) schedule = table.add({ # dict syntax, mixing names and index keys name="Heating Setpoint Schedule - new[1]", schedule_type_limits_name="Any Number", 2="Through: 12/31", 3="For: AllDays", 4="Until: 24:00,20.0" }) Returns ------- Created Record instance """ return self.batch_add([or_data if data is None else data])[0] add.__doc__ = "\n".join([fd.ref.lower() for fd in record_descriptors if fd.ref is not None]) return add
inert: hooks and links are not activated
def _dev_add_inert(self, records_data): """ inert: hooks and links are not activated """ added_records = [] for r_data in records_data: # create record record = Record( self, data=r_data ) # store # we don't check uniqueness here => will be done while checking hooks self._records[record.get_pk()] = record # remember record added_records.append(record) return added_records
Parameters ---------- filter_by: callable default None Callable must take one argument ( a record of table ) and return True to keep record or False to skip it. Example:. select ( lambda x: x. name == my_name ). If None records are not filtered.
def select(self, filter_by=None): """ Parameters ---------- filter_by: callable, default None Callable must take one argument (a record of table), and return True to keep record, or False to skip it. Example : .select(lambda x: x.name == "my_name"). If None, records are not filtered. Returns ------- Queryset instance, containing all selected records. """ records = self._records.values() if filter_by is None else filter(filter_by, self._records.values()) return Queryset(self, records=records)
Parameters ---------- filter_by: callable default None Callable must take one argument ( a record of table ) and return True to keep record or False to skip it. Example:. one ( lambda x: x. name == my_name ). If None records are not filtered.
def one(self, filter_by=None): """ Parameters ---------- filter_by: callable, default None Callable must take one argument (a record of table), and return True to keep record, or False to skip it. Example : .one(lambda x: x.name == "my_name"). If None, records are not filtered. Returns ------- Record instance if one and only one record is found. Else raises. Raises ------ RecordDoesNotExistError if no record is found MultipleRecordsReturnedError if multiple records are found """ return Queryset(self, records=self._records.values()).one(filter_by=filter_by)
Parameters ---------- records_data: list of dictionaries containing records data. Keys of dictionary may be field names and/ or field indexes
def batch_add(self, records_data): """ Parameters ---------- records_data: list of dictionaries containing records data. Keys of dictionary may be field names and/or field indexes Returns ------- Queryset instance of added records """ # workflow # -------- # (methods belonging to create/update/delete framework: # epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete) # 1. add inert # * data is checked # * old links are unregistered # * record is stored in table (=> pk uniqueness is checked) # 2. activate: hooks, links, external files # add inert added_records = self._dev_add_inert(records_data) # activate hooks for r in added_records: r._dev_activate_hooks() # activate links and external files for r in added_records: r._dev_activate_links() r._dev_activate_external_files() return Queryset(self, records=added_records)
target record must have been set
def register_record_hook(self, hook): """ target record must have been set """ for key in hook.keys: if key in self._record_hooks: field_descriptor = hook.target_record.get_field_descriptor(hook.target_index) raise FieldValidationError( f"Reference key already exists, can't create: {key}. " f"{field_descriptor.get_error_location_message(hook.target_value, hook.target_index)}" ) self._record_hooks[key] = hook
source record and index must have been set
def register_link(self, link): """ source record and index must have been set """ keys = tuple((ref, link.initial_hook_value) for ref in link.hook_references) # look for a record hook for k in keys: if k in self._record_hooks: # set link target link.set_target(target_record=self._record_hooks[k].target_record) break else: # look for a table hook for k in keys: if k in self._table_hooks: # set link target link.set_target(target_table=self._table_hooks[k]) break else: field_descriptor = link.source_record.get_field_descriptor(link.source_index) raise FieldValidationError( f"No object found with any of given references : {keys}. " f"{field_descriptor.get_error_location_message(link.initial_hook_value)}" ) # store by source if link.source_record not in self._links_by_source: self._links_by_source[link.source_record] = set() self._links_by_source[link.source_record].add(link) # store by target if link.target not in self._links_by_target: self._links_by_target[link.target] = set() self._links_by_target[link.target].add(link)
Parameters ---------- command: command cwd: current working directory stdout: output info stream ( must have write method ) stderr: output error stream ( must have write method ) shell: see subprocess. Popen beat_freq: if not none stdout will be used at least every beat_freq ( in seconds )
def run_subprocess(command, cwd=None, stdout=None, stderr=None, shell=False, beat_freq=None): """ Parameters ---------- command: command cwd: current working directory stdout: output info stream (must have 'write' method) stderr: output error stream (must have 'write' method) shell: see subprocess.Popen beat_freq: if not none, stdout will be used at least every beat_freq (in seconds) """ sys.encoding = CONF.encoding # prepare variables stdout = sys.stdout if stdout is None else stdout stderr = sys.stderr if stderr is None else stderr # run subprocess with subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, shell=shell, universal_newlines=True ) as sub_p: # link output streams with redirect_stream(sub_p.stdout, stdout), redirect_stream(sub_p.stderr, stderr): while True: try: sub_p.wait(timeout=beat_freq) break except subprocess.TimeoutExpired: stdout.write("subprocess is still running\n") if hasattr(sys.stdout, "flush"): sys.stdout.flush() return sub_p.returncode
path_or_content: path or content_str or content_bts or string_io or bytes_io
def get_string_buffer(path_or_content, expected_extension): """ path_or_content: path or content_str or content_bts or string_io or bytes_io Returns ------- string_buffer, path path will be None if input was not a path """ buffer, path = None, None # path or content string if isinstance(path_or_content, str): if path_or_content[-len(expected_extension)-1:] == ".%s" % expected_extension: if not os.path.isfile(path_or_content): raise FileNotFoundError("No file at given path: '%s'." % path_or_content) buffer, path = open(path_or_content, encoding=CONF.encoding), path_or_content else: buffer = io.StringIO(path_or_content, ) # text io elif isinstance(path_or_content, io.TextIOBase): buffer = path_or_content # bytes elif isinstance(path_or_content, bytes): buffer = io.StringIO(path_or_content.decode(encoding=CONF.encoding)) elif isinstance(path_or_content, io.BufferedIOBase): buffer = io.StringIO(path_or_content.read().decode(encoding=CONF.encoding)) else: raise ValueError("path_or_content type could not be identified") return buffer, path
Parameters ---------- simulation_step: if not given returns a raw report error_category: if only one argument is specified swaps dataframe report
def get_data(self, simulation_step=None, error_category=None): """ Parameters ---------- simulation_step: if not given, returns a raw report error_category: if only one argument is specified, swaps dataframe report """ if simulation_step is None and error_category is None: return self._df.dropna(axis="rows", how="all") if simulation_step is not None: if simulation_step not in self._simulation_step_list: raise RuntimeError("The simulation_step '%s' is not referred in the error file." % simulation_step) if error_category is not None: if error_category not in self.CATEGORIES: raise RuntimeError("The error_cat '%s' is wrong." % error_category) iterables = [simulation_step, error_category] columns = pd.MultiIndex.from_product(iterables) series = self._df[simulation_step][error_category].dropna(axis="rows", how="all") df = pd.DataFrame(index=series.index, columns=columns) df[simulation_step] = series return df return self._df[simulation_step].dropna(axis="rows", how="all") if error_category is not None: if error_category not in self.CATEGORIES: raise RuntimeError("The error_category '%s' is wrong." % error_category) df = self._df.copy() df.columns = df.columns.swaplevel(0, 1) return df[error_category].dropna(axis="rows", how="all")
Create regex and return. If error occurs returns None.
def _create_regex(self, line, intent_name): """ Create regex and return. If error occurs returns None. """ try: return re.compile(self._create_intent_pattern(line, intent_name), re.IGNORECASE) except sre_constants.error as e: LOG.warning('Failed to parse the line "{}" ' 'for {}'.format(line, intent_name)) return None
Convert status ( id ) to its string name.
def str(cls, value): '''Convert status (id) to its string name.''' for k, v in cls.__dict__.items(): if k[0] in string.ascii_uppercase and v == value: return k.lower().replace('_', ' ')
Returns the remaining duration for a recording.
def remaining_duration(self, time): '''Returns the remaining duration for a recording. ''' return max(0, self.end - max(self.start, time))
Serialize this object as dictionary usable for conversion to JSON.
def serialize(self): '''Serialize this object as dictionary usable for conversion to JSON. :return: Dictionary representing this object. ''' return { 'type': 'event', 'id': self.uid, 'attributes': { 'start': self.start, 'end': self.end, 'uid': self.uid, 'title': self.title, 'data': self.get_data(), 'status': Status.str(self.status) } }
Make an HTTP request to a given URL with optional parameters.
def http_request(url, post_data=None): '''Make an HTTP request to a given URL with optional parameters. ''' logger.debug('Requesting URL: %s' % url) buf = bio() curl = pycurl.Curl() curl.setopt(curl.URL, url.encode('ascii', 'ignore')) # Disable HTTPS verification methods if insecure is set if config()['server']['insecure']: curl.setopt(curl.SSL_VERIFYPEER, 0) curl.setopt(curl.SSL_VERIFYHOST, 0) if config()['server']['certificate']: # Make sure verification methods are turned on curl.setopt(curl.SSL_VERIFYPEER, 1) curl.setopt(curl.SSL_VERIFYHOST, 2) # Import your certificates curl.setopt(pycurl.CAINFO, config()['server']['certificate']) if post_data: curl.setopt(curl.HTTPPOST, post_data) curl.setopt(curl.WRITEFUNCTION, buf.write) curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST) curl.setopt(pycurl.USERPWD, "%s:%s" % (config()['server']['username'], config()['server']['password'])) curl.setopt(curl.HTTPHEADER, ['X-Requested-Auth: Digest']) curl.setopt(curl.FAILONERROR, True) curl.setopt(curl.FOLLOWLOCATION, True) curl.perform() curl.close() result = buf.getvalue() buf.close() return result
Get available service endpoints for a given service type from the Opencast ServiceRegistry.
def get_service(service_type): '''Get available service endpoints for a given service type from the Opencast ServiceRegistry. ''' endpoint = '/services/available.json?serviceType=' + str(service_type) url = '%s%s' % (config()['server']['url'], endpoint) response = http_request(url).decode('utf-8') services = (json.loads(response).get('services') or {}).get('service', []) services = ensurelist(services) endpoints = [service['host'] + service['path'] for service in services if service['online'] and service['active']] for endpoint in endpoints: logger.info(u'Endpoint for %s: %s', service_type, endpoint) return endpoints
Convert datetime into a unix timestamp. This is the equivalent to Python 3 s int ( datetime. timestamp () ).
def unix_ts(dtval): '''Convert datetime into a unix timestamp. This is the equivalent to Python 3's int(datetime.timestamp()). :param dt: datetime to convert ''' epoch = datetime(1970, 1, 1, 0, 0, tzinfo=tzutc()) delta = (dtval - epoch) return delta.days * 24 * 3600 + delta.seconds
Try to create a directory. Pass without error if it already exists.
def try_mkdir(directory): '''Try to create a directory. Pass without error if it already exists. ''' try: os.mkdir(directory) except OSError as err: if err.errno != errno.EEXIST: raise err
Get the location of a given service from Opencast and add it to the current configuration.
def configure_service(service): '''Get the location of a given service from Opencast and add it to the current configuration. ''' while not config().get('service-' + service) and not terminate(): try: config()['service-' + service] = \ get_service('org.opencastproject.' + service) except pycurl.error as e: logger.error('Could not get %s endpoint: %s. Retrying in 5s' % (service, e)) time.sleep(5.0)
Register this capture agent at the Matterhorn admin server so that it shows up in the admin interface.
def register_ca(status='idle'): '''Register this capture agent at the Matterhorn admin server so that it shows up in the admin interface. :param address: Address of the capture agent web ui :param status: Current status of the capture agent ''' # If this is a backup CA we don't tell the Matterhorn core that we are # here. We will just run silently in the background: if config()['agent']['backup_mode']: return params = [('address', config()['ui']['url']), ('state', status)] name = urlquote(config()['agent']['name'].encode('utf-8'), safe='') url = '%s/agents/%s' % (config()['service-capture.admin'][0], name) try: response = http_request(url, params).decode('utf-8') if response: logger.info(response) except pycurl.error as e: logger.warning('Could not set agent state to %s: %s' % (status, e))
Send the state of the current recording to the Matterhorn core.
def recording_state(recording_id, status): '''Send the state of the current recording to the Matterhorn core. :param recording_id: ID of the current recording :param status: Status of the recording ''' # If this is a backup CA we do not update the recording state since the # actual CA does that and we want to interfere. We will just run silently # in the background: if config()['agent']['backup_mode']: return params = [('state', status)] url = config()['service-capture.admin'][0] url += '/recordings/%s' % recording_id try: result = http_request(url, params) logger.info(result) except pycurl.error as e: logger.warning('Could not set recording state to %s: %s' % (status, e))
Update the status of a particular event in the database.
def update_event_status(event, status): '''Update the status of a particular event in the database. ''' dbs = db.get_session() dbs.query(db.RecordedEvent).filter(db.RecordedEvent.start == event.start)\ .update({'status': status}) event.status = status dbs.commit()
Update the status of a particular service in the database.
def set_service_status(service, status): '''Update the status of a particular service in the database. ''' srv = db.ServiceStates() srv.type = service srv.status = status dbs = db.get_session() dbs.merge(srv) dbs.commit() dbs.close()
Update the status of a particular service in the database.
def get_service_status(service): '''Update the status of a particular service in the database. ''' dbs = db.get_session() srvs = dbs.query(db.ServiceStates).filter(db.ServiceStates.type == service) if srvs.count(): return srvs[0].status return db.ServiceStatus.STOPPED
Update the current agent state in opencast.
def update_agent_state(): '''Update the current agent state in opencast. ''' configure_service('capture.admin') status = 'idle' # Determine reported agent state with priority list if get_service_status(db.Service.SCHEDULE) == db.ServiceStatus.STOPPED: status = 'offline' elif get_service_status(db.Service.CAPTURE) == db.ServiceStatus.BUSY: status = 'capturing' elif get_service_status(db.Service.INGEST) == db.ServiceStatus.BUSY: status = 'uploading' register_ca(status=status)
Find the best match for the configuration file.
def configuration_file(cfgfile): '''Find the best match for the configuration file. ''' if cfgfile is not None: return cfgfile # If no file is explicitely specified, probe for the configuration file # location. cfg = './etc/pyca.conf' if not os.path.isfile(cfg): return '/etc/pyca.conf' return cfg
Update configuration from file.
def update_configuration(cfgfile=None): '''Update configuration from file. :param cfgfile: Configuration file to load. ''' configobj.DEFAULT_INTERPOLATION = 'template' cfgfile = configuration_file(cfgfile) cfg = configobj.ConfigObj(cfgfile, configspec=cfgspec, encoding='utf-8') validator = Validator() val = cfg.validate(validator) if val is not True: raise ValueError('Invalid configuration: %s' % val) if len(cfg['capture']['files']) != len(cfg['capture']['flavors']): raise ValueError('List of files and flavors do not match') globals()['__config'] = cfg logger_init() if cfg['server'].get('url', '').endswith('/'): logger.warning('Base URL ends with /. This is most likely a ' 'configuration error. The URL should contain nothing ' 'of the service paths.') logger.info('Configuration loaded from %s' % cfgfile) check() return cfg
Check configuration for sanity.
def check(): '''Check configuration for sanity. ''' if config('server')['insecure']: logger.warning('HTTPS CHECKS ARE TURNED OFF. A SECURE CONNECTION IS ' 'NOT GUARANTEED') if config('server')['certificate']: # Ensure certificate exists and is readable open(config('server')['certificate'], 'rb').close() if config('agent')['backup_mode']: logger.info('Agent runs in backup mode. No data will be sent to ' 'Opencast')
Initialize logger based on configuration
def logger_init(): '''Initialize logger based on configuration ''' handlers = [] logconf = config('logging') if logconf['syslog']: handlers.append(logging.handlers.SysLogHandler(address='/dev/log')) if logconf['stderr']: handlers.append(logging.StreamHandler(sys.stderr)) if logconf['file']: handlers.append(logging.handlers.WatchedFileHandler(logconf['file'])) for handler in handlers: handler.setFormatter(logging.Formatter(logconf['format'])) logging.root.addHandler(handler) logging.root.setLevel(logconf['level'].upper()) logger.info('Log level set to %s' % logconf['level'])
Serve the status page of the capture agent.
def home(): '''Serve the status page of the capture agent. ''' # Get IDs of existing preview images preview = config()['capture']['preview'] previewdir = config()['capture']['preview_dir'] preview = [p.replace('{{previewdir}}', previewdir) for p in preview] preview = zip(preview, range(len(preview))) preview = [p[1] for p in preview if os.path.isfile(p[0])] # Get limits for recording table try: limit_upcoming = int(request.args.get('limit_upcoming', 5)) limit_processed = int(request.args.get('limit_processed', 15)) except ValueError: limit_upcoming = 5 limit_processed = 15 db = get_session() upcoming_events = db.query(UpcomingEvent)\ .order_by(UpcomingEvent.start)\ .limit(limit_upcoming) recorded_events = db.query(RecordedEvent)\ .order_by(RecordedEvent.start.desc())\ .limit(limit_processed) recording = get_service_status(Service.CAPTURE) == ServiceStatus.BUSY uploading = get_service_status(Service.INGEST) == ServiceStatus.BUSY processed = db.query(RecordedEvent).count() upcoming = db.query(UpcomingEvent).count() return render_template('home.html', preview=preview, config=config(), recorded_events=recorded_events, upcoming_events=upcoming_events, recording=recording, uploading=uploading, processed=processed, upcoming=upcoming, limit_upcoming=limit_upcoming, limit_processed=limit_processed, dtfmt=dtfmt)
Serve the preview image with the given id
def serve_image(image_id): '''Serve the preview image with the given id ''' try: preview_dir = config()['capture']['preview_dir'] filepath = config()['capture']['preview'][image_id] filepath = filepath.replace('{{previewdir}}', preview_dir) filepath = os.path.abspath(filepath) if os.path.isfile(filepath): directory, filename = filepath.rsplit('/', 1) return send_from_directory(directory, filename) except (IndexError, KeyError): pass return '', 404
Intercept sigterm and terminate all processes.
def sigterm_handler(signum, frame): '''Intercept sigterm and terminate all processes. ''' sigint_handler(signum, frame) for process in multiprocessing.active_children(): process.terminate() sys.exit(0)
Start all services.
def run_all(*modules): '''Start all services. ''' processes = [multiprocessing.Process(target=mod.run) for mod in modules] for p in processes: p.start() for p in processes: p.join()
Parse Opencast schedule iCalendar file and return events as dict
def parse_ical(vcal): '''Parse Opencast schedule iCalendar file and return events as dict ''' vcal = vcal.replace('\r\n ', '').replace('\r\n\r\n', '\r\n') vevents = vcal.split('\r\nBEGIN:VEVENT\r\n') del(vevents[0]) events = [] for vevent in vevents: event = {} for line in vevent.split('\r\n'): line = line.split(':', 1) key = line[0].lower() if len(line) <= 1 or key == 'end': continue if key.startswith('dt'): event[key] = unix_ts(dateutil.parser.parse(line[1])) continue if not key.startswith('attach'): event[key] = line[1] continue # finally handle attachments event['attach'] = event.get('attach', []) attachment = {} for x in [x.split('=') for x in line[0].split(';')]: if x[0].lower() in ['fmttype', 'x-apple-filename']: attachment[x[0].lower()] = x[1] attachment['data'] = b64decode(line[1]).decode('utf-8') event['attach'].append(attachment) events.append(event) return events
Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure.
def get_schedule(): '''Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure. ''' params = {'agentid': config()['agent']['name'].encode('utf8')} lookahead = config()['agent']['cal_lookahead'] * 24 * 60 * 60 if lookahead: params['cutoff'] = str((timestamp() + lookahead) * 1000) uri = '%s/calendars?%s' % (config()['service-scheduler'][0], urlencode(params)) try: vcal = http_request(uri) except pycurl.error as e: logger.error('Could not get schedule: %s' % e) return try: cal = parse_ical(vcal.decode('utf-8')) except Exception: logger.error('Could not parse ical') logger.error(traceback.format_exc()) return db = get_session() db.query(UpcomingEvent).delete() for event in cal: # Ignore events that have already ended if event['dtend'] <= timestamp(): continue e = UpcomingEvent() e.start = event['dtstart'] e.end = event['dtend'] e.uid = event.get('uid') e.title = event.get('summary') e.set_data(event) db.add(e) db.commit()
Main loop retrieving the schedule.
def control_loop(): '''Main loop, retrieving the schedule. ''' set_service_status(Service.SCHEDULE, ServiceStatus.BUSY) notify.notify('READY=1') while not terminate(): notify.notify('WATCHDOG=1') # Try getting an updated schedule get_schedule() session = get_session() next_event = session.query(UpcomingEvent)\ .filter(UpcomingEvent.end > timestamp())\ .order_by(UpcomingEvent.start)\ .first() if next_event: logger.info('Next scheduled recording: %s', datetime.fromtimestamp(next_event.start)) notify.notify('STATUS=Next scheduled recording: %s' % datetime.fromtimestamp(next_event.start)) else: logger.info('No scheduled recording') notify.notify('STATUS=No scheduled recording') session.close() next_update = timestamp() + config()['agent']['update_frequency'] while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down schedule service') set_service_status(Service.SCHEDULE, ServiceStatus.STOPPED)
Main loop updating the capture agent state.
def control_loop(): '''Main loop, updating the capture agent state. ''' set_service_status(Service.AGENTSTATE, ServiceStatus.BUSY) notify.notify('READY=1') notify.notify('STATUS=Running') while not terminate(): notify.notify('WATCHDOG=1') update_agent_state() next_update = timestamp() + config()['agent']['update_frequency'] while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down agentstate service') set_service_status(Service.AGENTSTATE, ServiceStatus.STOPPED)
Return a response with a jsonapi error object
def make_error_response(error, status=500): ''' Return a response with a jsonapi error object ''' content = { 'errors': [{ 'status': status, 'title': error }] } return make_response(jsonify(content), status)
Return a response with a list of jsonapi data objects
def make_data_response(data, status=200): ''' Return a response with a list of jsonapi data objects ''' content = {'data': ensurelist(data)} return make_response(jsonify(content), status)
Serve a json representation of internal agentstate as meta data
def internal_state(): '''Serve a json representation of internal agentstate as meta data ''' data = {'services': { 'capture': ServiceStatus.str(get_service_status(Service.CAPTURE)), 'ingest': ServiceStatus.str(get_service_status(Service.INGEST)), 'schedule': ServiceStatus.str(get_service_status(Service.SCHEDULE)), 'agentstate': ServiceStatus.str(get_service_status(Service.AGENTSTATE)) } } return make_response(jsonify({'meta': data}))
Serve a JSON representation of events
def events(): '''Serve a JSON representation of events ''' db = get_session() upcoming_events = db.query(UpcomingEvent)\ .order_by(UpcomingEvent.start) recorded_events = db.query(RecordedEvent)\ .order_by(RecordedEvent.start.desc()) result = [event.serialize() for event in upcoming_events] result += [event.serialize() for event in recorded_events] return make_data_response(result)
Return a specific events JSON
def event(uid): '''Return a specific events JSON ''' db = get_session() event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first() \ or db.query(UpcomingEvent).filter(UpcomingEvent.uid == uid).first() if event: return make_data_response(event.serialize()) return make_error_response('No event with specified uid', 404)
Delete a specific event identified by its uid. Note that only recorded events can be deleted. Events in the buffer for upcoming events are regularly replaced anyway and a manual removal could have unpredictable effects.
def delete_event(uid): '''Delete a specific event identified by its uid. Note that only recorded events can be deleted. Events in the buffer for upcoming events are regularly replaced anyway and a manual removal could have unpredictable effects. Use ?hard=true parameter to delete the recorded files on disk as well. Returns 204 if the action was successful. Returns 404 if event does not exist ''' logger.info('deleting event %s via api', uid) db = get_session() events = db.query(RecordedEvent).filter(RecordedEvent.uid == uid) if not events.count(): return make_error_response('No event with specified uid', 404) hard_delete = request.args.get('hard', 'false') if hard_delete == 'true': logger.info('deleting recorded files at %s', events[0].directory()) shutil.rmtree(events[0].directory()) events.delete() db.commit() return make_response('', 204)
Modify an event specified by its uid. The modifications for the event are expected as JSON with the content type correctly set in the request.
def modify_event(uid): '''Modify an event specified by its uid. The modifications for the event are expected as JSON with the content type correctly set in the request. Note that this method works for recorded events only. Upcoming events part of the scheduler cache cannot be modified. ''' try: data = request.get_json()['data'][0] if data['type'] != 'event' or data['id'] != uid: return make_error_response('Invalid data', 400) # Check attributes for key in data['attributes'].keys(): if key not in ('status', 'start', 'end'): return make_error_response('Invalid data', 400) # Check new status new_status = data['attributes'].get('status') if new_status: new_status = new_status.upper().replace(' ', '_') data['attributes']['status'] = int(getattr(Status, new_status)) except Exception: return make_error_response('Invalid data', 400) db = get_session() event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first() if not event: return make_error_response('No event with specified uid', 404) event.start = data['attributes'].get('start', event.start) event.end = data['attributes'].get('end', event.end) event.status = data['attributes'].get('status', event.status) logger.debug('Updating event %s via api', uid) db.commit() return make_data_response(event.serialize())
Extract the set of configuration parameters from the properties attached to the schedule
def get_config_params(properties): '''Extract the set of configuration parameters from the properties attached to the schedule ''' param = [] wdef = '' for prop in properties.split('\n'): if prop.startswith('org.opencastproject.workflow.config'): key, val = prop.split('=', 1) key = key.split('.')[-1] param.append((key, val)) elif prop.startswith('org.opencastproject.workflow.definition'): wdef = prop.split('=', 1)[-1] return wdef, param
Ingest a finished recording to the Opencast server.
def ingest(event): '''Ingest a finished recording to the Opencast server. ''' # Update status set_service_status(Service.INGEST, ServiceStatus.BUSY) notify.notify('STATUS=Uploading') recording_state(event.uid, 'uploading') update_event_status(event, Status.UPLOADING) # Select ingest service # The ingest service to use is selected at random from the available # ingest services to ensure that not every capture agent uses the same # service at the same time service = config('service-ingest') service = service[randrange(0, len(service))] logger.info('Selecting ingest service to use: ' + service) # create mediapackage logger.info('Creating new mediapackage') mediapackage = http_request(service + '/createMediaPackage') # extract workflow_def, workflow_config and add DC catalogs prop = 'org.opencastproject.capture.agent.properties' dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/' for attachment in event.get_data().get('attach'): data = attachment.get('data') if attachment.get('x-apple-filename') == prop: workflow_def, workflow_config = get_config_params(data) # Check for dublincore catalogs elif attachment.get('fmttype') == 'application/xml' and dcns in data: name = attachment.get('x-apple-filename', '').rsplit('.', 1)[0] logger.info('Adding %s DC catalog' % name) fields = [('mediaPackage', mediapackage), ('flavor', 'dublincore/%s' % name), ('dublinCore', data.encode('utf-8'))] mediapackage = http_request(service + '/addDCCatalog', fields) # add track for (flavor, track) in event.get_tracks(): logger.info('Adding track ({0} -> {1})'.format(flavor, track)) track = track.encode('ascii', 'ignore') fields = [('mediaPackage', mediapackage), ('flavor', flavor), ('BODY1', (pycurl.FORM_FILE, track))] mediapackage = http_request(service + '/addTrack', fields) # ingest logger.info('Ingest recording') fields = [('mediaPackage', mediapackage)] if workflow_def: fields.append(('workflowDefinitionId', workflow_def)) if event.uid: fields.append(('workflowInstanceId', event.uid.encode('ascii', 'ignore'))) fields += workflow_config mediapackage = http_request(service + '/ingest', fields) # Update status recording_state(event.uid, 'upload_finished') update_event_status(event, Status.FINISHED_UPLOADING) notify.notify('STATUS=Running') set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE) logger.info('Finished ingest')
Start a capture process but make sure to catch any errors during this process log them but otherwise ignore them.
def safe_start_ingest(event): '''Start a capture process but make sure to catch any errors during this process, log them but otherwise ignore them. ''' try: ingest(event) except Exception: logger.error('Something went wrong during the upload') logger.error(traceback.format_exc()) # Update state if something went wrong recording_state(event.uid, 'upload_error') update_event_status(event, Status.FAILED_UPLOADING) set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
Main loop of the capture agent retrieving and checking the schedule as well as starting the capture process if necessry.
def control_loop(): '''Main loop of the capture agent, retrieving and checking the schedule as well as starting the capture process if necessry. ''' set_service_status(Service.INGEST, ServiceStatus.IDLE) notify.notify('READY=1') notify.notify('STATUS=Running') while not terminate(): notify.notify('WATCHDOG=1') # Get next recording event = get_session().query(RecordedEvent)\ .filter(RecordedEvent.status == Status.FINISHED_RECORDING).first() if event: safe_start_ingest(event) time.sleep(1.0) logger.info('Shutting down ingest service') set_service_status(Service.INGEST, ServiceStatus.STOPPED)
Intercept sigterm and terminate all processes.
def sigterm_handler(signum, frame): '''Intercept sigterm and terminate all processes. ''' if captureproc and captureproc.poll() is None: captureproc.terminate() terminate(True) sys.exit(0)