partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
get_overrides_filename
|
Get the name of the file containing configuration overrides
from the provided environment variable.
|
ecommerce_worker/configuration/__init__.py
|
def get_overrides_filename(variable):
"""
Get the name of the file containing configuration overrides
from the provided environment variable.
"""
filename = os.environ.get(variable)
if filename is None:
msg = 'Please set the {} environment variable.'.format(variable)
raise EnvironmentError(msg)
return filename
|
def get_overrides_filename(variable):
"""
Get the name of the file containing configuration overrides
from the provided environment variable.
"""
filename = os.environ.get(variable)
if filename is None:
msg = 'Please set the {} environment variable.'.format(variable)
raise EnvironmentError(msg)
return filename
|
[
"Get",
"the",
"name",
"of",
"the",
"file",
"containing",
"configuration",
"overrides",
"from",
"the",
"provided",
"environment",
"variable",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/configuration/__init__.py#L9-L20
|
[
"def",
"get_overrides_filename",
"(",
"variable",
")",
":",
"filename",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"variable",
")",
"if",
"filename",
"is",
"None",
":",
"msg",
"=",
"'Please set the {} environment variable.'",
".",
"format",
"(",
"variable",
")",
"raise",
"EnvironmentError",
"(",
"msg",
")",
"return",
"filename"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
get_output_files_layout
|
Parameters
----------
output_category: str
inputs: epw, idf
table: summary table
other: other
|
oplus/compatibility/outputs.py
|
def get_output_files_layout(output_category):
"""
Parameters
----------
output_category: str
inputs: epw, idf
table: summary table
other: other
"""
# check category
if output_category not in ("inputs", "table", "other"):
raise RuntimeError(f"unknown {output_category}")
# get version dict
layouts = _layouts_matrix[OS_NAME][output_category]
# get version
return get_value_by_version(layouts)
|
def get_output_files_layout(output_category):
"""
Parameters
----------
output_category: str
inputs: epw, idf
table: summary table
other: other
"""
# check category
if output_category not in ("inputs", "table", "other"):
raise RuntimeError(f"unknown {output_category}")
# get version dict
layouts = _layouts_matrix[OS_NAME][output_category]
# get version
return get_value_by_version(layouts)
|
[
"Parameters",
"----------",
"output_category",
":",
"str",
"inputs",
":",
"epw",
"idf",
"table",
":",
"summary",
"table",
"other",
":",
"other"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/compatibility/outputs.py#L60-L77
|
[
"def",
"get_output_files_layout",
"(",
"output_category",
")",
":",
"# check category",
"if",
"output_category",
"not",
"in",
"(",
"\"inputs\"",
",",
"\"table\"",
",",
"\"other\"",
")",
":",
"raise",
"RuntimeError",
"(",
"f\"unknown {output_category}\"",
")",
"# get version dict",
"layouts",
"=",
"_layouts_matrix",
"[",
"OS_NAME",
"]",
"[",
"output_category",
"]",
"# get version",
"return",
"get_value_by_version",
"(",
"layouts",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
get_value_by_version
|
Finds the value depending in current eplus version.
Parameters
----------
d: dict
{(0, 0): value, (x, x): value, ...}
for current version (cv), current value is the value of version v such as v <= cv < v+1
|
oplus/compatibility/util.py
|
def get_value_by_version(d):
"""
Finds the value depending in current eplus version.
Parameters
----------
d: dict
{(0, 0): value, (x, x): value, ...}
for current version (cv), current value is the value of version v such as v <= cv < v+1
"""
from oplus import CONF # touchy import
cv = CONF.eplus_version[:2]
for v, value in sorted(d.items(), reverse=True):
if cv >= v:
return value
|
def get_value_by_version(d):
"""
Finds the value depending in current eplus version.
Parameters
----------
d: dict
{(0, 0): value, (x, x): value, ...}
for current version (cv), current value is the value of version v such as v <= cv < v+1
"""
from oplus import CONF # touchy import
cv = CONF.eplus_version[:2]
for v, value in sorted(d.items(), reverse=True):
if cv >= v:
return value
|
[
"Finds",
"the",
"value",
"depending",
"in",
"current",
"eplus",
"version",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/compatibility/util.py#L27-L43
|
[
"def",
"get_value_by_version",
"(",
"d",
")",
":",
"from",
"oplus",
"import",
"CONF",
"# touchy import",
"cv",
"=",
"CONF",
".",
"eplus_version",
"[",
":",
"2",
"]",
"for",
"v",
",",
"value",
"in",
"sorted",
"(",
"d",
".",
"items",
"(",
")",
",",
"reverse",
"=",
"True",
")",
":",
"if",
"cv",
">=",
"v",
":",
"return",
"value"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
switch_to_datetime_instants
|
works inplace
|
oplus/standard_output/switch_instants.py
|
def switch_to_datetime_instants(df, start_year, eplus_frequency):
"""
works inplace
"""
# timestep -> monthly
if eplus_frequency in (TIMESTEP, DAILY, HOURLY, MONTHLY):
# prepare year switch
if eplus_frequency in (TIMESTEP, HOURLY, DAILY):
# print((df[["month", "day"]] - df[["month", "day"]].shift()) == pd.Series([-12, -31]))
year_counter = (
(df[["month", "day"]] - df[["month", "day"]].shift()) ==
pd.Series(dict(month=12, day=-31))
).all(axis=1).cumsum()
else:
year_counter = ((df["month"] - df["month"].shift()) == -12).cumsum()
# add year columns
df["year"] = year_counter + start_year
# create index
columns = {
TIMESTEP: ("year", "month", "day", "hour", "minute"),
HOURLY: ("year", "month", "day", "hour"),
DAILY: ("year", "month", "day"),
MONTHLY: ("year", "month")
}[eplus_frequency]
if eplus_frequency == MONTHLY:
df.index = df.apply( # apply transforms ints to floats, we need to re-cast
lambda x: dt.datetime(*(tuple(int(x[k]) for k in columns) + (1,))),
axis=1
)
else:
df.index = df.apply(lambda x: dt.datetime(*(int(x[k]) for k in columns)), axis=1)
# drop old columns
df.drop(columns=list(columns), inplace=True)
# force frequency
if eplus_frequency == TIMESTEP:
# find freq
ts = df.index[1] - df.index[0]
# force
forced_df = df.asfreq(ts)
else:
forced_df = df.asfreq({
HOURLY: "H",
DAILY: "D",
MONTHLY: "MS"
}[eplus_frequency])
# if timestep, hourly or daily, check did not change (only those can suffer from leap year problems)
if eplus_frequency in (TIMESTEP, HOURLY, DAILY):
try:
assert_index_equal(df.index, forced_df.index)
except AssertionError:
raise ValueError(
f"Couldn't convert to datetime instants (frequency: {eplus_frequency}). Probable cause : "
f"given start year ({start_year}) is incorrect and data can't match because of leap year issues."
) from None
return forced_df
# annual
if eplus_frequency == ANNUAL:
# check first year
if df["year"].iloc[0] != start_year:
raise ValueError(
f"Given start year ({start_year}) differs from annual output data first year ({df['year'].iloc[0]}),"
f"can't switch to datetime instants.")
df.index = df["year"].map(lambda x: dt.datetime(x, 1, 1))
del df["year"]
# force freq
df = df.asfreq("YS")
return df
# run period
if eplus_frequency == RUN_PERIOD:
return df
raise AssertionError("should not be here")
|
def switch_to_datetime_instants(df, start_year, eplus_frequency):
"""
works inplace
"""
# timestep -> monthly
if eplus_frequency in (TIMESTEP, DAILY, HOURLY, MONTHLY):
# prepare year switch
if eplus_frequency in (TIMESTEP, HOURLY, DAILY):
# print((df[["month", "day"]] - df[["month", "day"]].shift()) == pd.Series([-12, -31]))
year_counter = (
(df[["month", "day"]] - df[["month", "day"]].shift()) ==
pd.Series(dict(month=12, day=-31))
).all(axis=1).cumsum()
else:
year_counter = ((df["month"] - df["month"].shift()) == -12).cumsum()
# add year columns
df["year"] = year_counter + start_year
# create index
columns = {
TIMESTEP: ("year", "month", "day", "hour", "minute"),
HOURLY: ("year", "month", "day", "hour"),
DAILY: ("year", "month", "day"),
MONTHLY: ("year", "month")
}[eplus_frequency]
if eplus_frequency == MONTHLY:
df.index = df.apply( # apply transforms ints to floats, we need to re-cast
lambda x: dt.datetime(*(tuple(int(x[k]) for k in columns) + (1,))),
axis=1
)
else:
df.index = df.apply(lambda x: dt.datetime(*(int(x[k]) for k in columns)), axis=1)
# drop old columns
df.drop(columns=list(columns), inplace=True)
# force frequency
if eplus_frequency == TIMESTEP:
# find freq
ts = df.index[1] - df.index[0]
# force
forced_df = df.asfreq(ts)
else:
forced_df = df.asfreq({
HOURLY: "H",
DAILY: "D",
MONTHLY: "MS"
}[eplus_frequency])
# if timestep, hourly or daily, check did not change (only those can suffer from leap year problems)
if eplus_frequency in (TIMESTEP, HOURLY, DAILY):
try:
assert_index_equal(df.index, forced_df.index)
except AssertionError:
raise ValueError(
f"Couldn't convert to datetime instants (frequency: {eplus_frequency}). Probable cause : "
f"given start year ({start_year}) is incorrect and data can't match because of leap year issues."
) from None
return forced_df
# annual
if eplus_frequency == ANNUAL:
# check first year
if df["year"].iloc[0] != start_year:
raise ValueError(
f"Given start year ({start_year}) differs from annual output data first year ({df['year'].iloc[0]}),"
f"can't switch to datetime instants.")
df.index = df["year"].map(lambda x: dt.datetime(x, 1, 1))
del df["year"]
# force freq
df = df.asfreq("YS")
return df
# run period
if eplus_frequency == RUN_PERIOD:
return df
raise AssertionError("should not be here")
|
[
"works",
"inplace"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/standard_output/switch_instants.py#L8-L89
|
[
"def",
"switch_to_datetime_instants",
"(",
"df",
",",
"start_year",
",",
"eplus_frequency",
")",
":",
"# timestep -> monthly",
"if",
"eplus_frequency",
"in",
"(",
"TIMESTEP",
",",
"DAILY",
",",
"HOURLY",
",",
"MONTHLY",
")",
":",
"# prepare year switch",
"if",
"eplus_frequency",
"in",
"(",
"TIMESTEP",
",",
"HOURLY",
",",
"DAILY",
")",
":",
"# print((df[[\"month\", \"day\"]] - df[[\"month\", \"day\"]].shift()) == pd.Series([-12, -31]))",
"year_counter",
"=",
"(",
"(",
"df",
"[",
"[",
"\"month\"",
",",
"\"day\"",
"]",
"]",
"-",
"df",
"[",
"[",
"\"month\"",
",",
"\"day\"",
"]",
"]",
".",
"shift",
"(",
")",
")",
"==",
"pd",
".",
"Series",
"(",
"dict",
"(",
"month",
"=",
"12",
",",
"day",
"=",
"-",
"31",
")",
")",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
".",
"cumsum",
"(",
")",
"else",
":",
"year_counter",
"=",
"(",
"(",
"df",
"[",
"\"month\"",
"]",
"-",
"df",
"[",
"\"month\"",
"]",
".",
"shift",
"(",
")",
")",
"==",
"-",
"12",
")",
".",
"cumsum",
"(",
")",
"# add year columns",
"df",
"[",
"\"year\"",
"]",
"=",
"year_counter",
"+",
"start_year",
"# create index",
"columns",
"=",
"{",
"TIMESTEP",
":",
"(",
"\"year\"",
",",
"\"month\"",
",",
"\"day\"",
",",
"\"hour\"",
",",
"\"minute\"",
")",
",",
"HOURLY",
":",
"(",
"\"year\"",
",",
"\"month\"",
",",
"\"day\"",
",",
"\"hour\"",
")",
",",
"DAILY",
":",
"(",
"\"year\"",
",",
"\"month\"",
",",
"\"day\"",
")",
",",
"MONTHLY",
":",
"(",
"\"year\"",
",",
"\"month\"",
")",
"}",
"[",
"eplus_frequency",
"]",
"if",
"eplus_frequency",
"==",
"MONTHLY",
":",
"df",
".",
"index",
"=",
"df",
".",
"apply",
"(",
"# apply transforms ints to floats, we need to re-cast",
"lambda",
"x",
":",
"dt",
".",
"datetime",
"(",
"*",
"(",
"tuple",
"(",
"int",
"(",
"x",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"columns",
")",
"+",
"(",
"1",
",",
")",
")",
")",
",",
"axis",
"=",
"1",
")",
"else",
":",
"df",
".",
"index",
"=",
"df",
".",
"apply",
"(",
"lambda",
"x",
":",
"dt",
".",
"datetime",
"(",
"*",
"(",
"int",
"(",
"x",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"columns",
")",
")",
",",
"axis",
"=",
"1",
")",
"# drop old columns",
"df",
".",
"drop",
"(",
"columns",
"=",
"list",
"(",
"columns",
")",
",",
"inplace",
"=",
"True",
")",
"# force frequency",
"if",
"eplus_frequency",
"==",
"TIMESTEP",
":",
"# find freq",
"ts",
"=",
"df",
".",
"index",
"[",
"1",
"]",
"-",
"df",
".",
"index",
"[",
"0",
"]",
"# force",
"forced_df",
"=",
"df",
".",
"asfreq",
"(",
"ts",
")",
"else",
":",
"forced_df",
"=",
"df",
".",
"asfreq",
"(",
"{",
"HOURLY",
":",
"\"H\"",
",",
"DAILY",
":",
"\"D\"",
",",
"MONTHLY",
":",
"\"MS\"",
"}",
"[",
"eplus_frequency",
"]",
")",
"# if timestep, hourly or daily, check did not change (only those can suffer from leap year problems)",
"if",
"eplus_frequency",
"in",
"(",
"TIMESTEP",
",",
"HOURLY",
",",
"DAILY",
")",
":",
"try",
":",
"assert_index_equal",
"(",
"df",
".",
"index",
",",
"forced_df",
".",
"index",
")",
"except",
"AssertionError",
":",
"raise",
"ValueError",
"(",
"f\"Couldn't convert to datetime instants (frequency: {eplus_frequency}). Probable cause : \"",
"f\"given start year ({start_year}) is incorrect and data can't match because of leap year issues.\"",
")",
"from",
"None",
"return",
"forced_df",
"# annual",
"if",
"eplus_frequency",
"==",
"ANNUAL",
":",
"# check first year",
"if",
"df",
"[",
"\"year\"",
"]",
".",
"iloc",
"[",
"0",
"]",
"!=",
"start_year",
":",
"raise",
"ValueError",
"(",
"f\"Given start year ({start_year}) differs from annual output data first year ({df['year'].iloc[0]}),\"",
"f\"can't switch to datetime instants.\"",
")",
"df",
".",
"index",
"=",
"df",
"[",
"\"year\"",
"]",
".",
"map",
"(",
"lambda",
"x",
":",
"dt",
".",
"datetime",
"(",
"x",
",",
"1",
",",
"1",
")",
")",
"del",
"df",
"[",
"\"year\"",
"]",
"# force freq",
"df",
"=",
"df",
".",
"asfreq",
"(",
"\"YS\"",
")",
"return",
"df",
"# run period",
"if",
"eplus_frequency",
"==",
"RUN_PERIOD",
":",
"return",
"df",
"raise",
"AssertionError",
"(",
"\"should not be here\"",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
_Conf.eplus_version
|
if _eplus_version is defined => _eplus_version
else most recent eplus available version
|
oplus/configuration.py
|
def eplus_version(self):
"""
if _eplus_version is defined => _eplus_version
else most recent eplus available version
"""
# check energy plus is installed
if len(self.eplus_available_versions) == 0:
raise RuntimeError("Energy plus is not install, can't use oplus package.")
# see if version is defined
if self._eplus_version is not None:
return self._eplus_version
# return most recent version
return sorted(self.eplus_available_versions.keys(), reverse=True)[0]
|
def eplus_version(self):
"""
if _eplus_version is defined => _eplus_version
else most recent eplus available version
"""
# check energy plus is installed
if len(self.eplus_available_versions) == 0:
raise RuntimeError("Energy plus is not install, can't use oplus package.")
# see if version is defined
if self._eplus_version is not None:
return self._eplus_version
# return most recent version
return sorted(self.eplus_available_versions.keys(), reverse=True)[0]
|
[
"if",
"_eplus_version",
"is",
"defined",
"=",
">",
"_eplus_version",
"else",
"most",
"recent",
"eplus",
"available",
"version"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/configuration.py#L28-L42
|
[
"def",
"eplus_version",
"(",
"self",
")",
":",
"# check energy plus is installed",
"if",
"len",
"(",
"self",
".",
"eplus_available_versions",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Energy plus is not install, can't use oplus package.\"",
")",
"# see if version is defined",
"if",
"self",
".",
"_eplus_version",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_eplus_version",
"# return most recent version",
"return",
"sorted",
"(",
"self",
".",
"eplus_available_versions",
".",
"keys",
"(",
")",
",",
"reverse",
"=",
"True",
")",
"[",
"0",
"]"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
_check_and_sanitize_datetime_instants
|
Parameters
----------
df
Returns
-------
sanitized df
|
oplus/weather_data/weather_data.py
|
def _check_and_sanitize_datetime_instants(df):
"""
Parameters
----------
df
Returns
-------
sanitized df
"""
# leave if not relevant
if df is None or len(df) == 0:
return df
# check datetime index
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError("df index must be a datetime index.")
# force frequency if needed
if df.index.freq != "H":
forced_df = df.asfreq("H")
# check no change
try:
assert_index_equal(df.index, forced_df.index)
except AssertionError:
raise ValueError(
f"Couldn't convert to hourly datetime instants. Probable cause : "
f"given start instant ({df.index[0]}) is incorrect and data can't match because of leap year issues."
) from None
# replace old variable
df = forced_df
# check first minute is 0
if df.index[0].minute != 0:
raise ValueError("Minutes must be 0.")
return df
|
def _check_and_sanitize_datetime_instants(df):
"""
Parameters
----------
df
Returns
-------
sanitized df
"""
# leave if not relevant
if df is None or len(df) == 0:
return df
# check datetime index
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError("df index must be a datetime index.")
# force frequency if needed
if df.index.freq != "H":
forced_df = df.asfreq("H")
# check no change
try:
assert_index_equal(df.index, forced_df.index)
except AssertionError:
raise ValueError(
f"Couldn't convert to hourly datetime instants. Probable cause : "
f"given start instant ({df.index[0]}) is incorrect and data can't match because of leap year issues."
) from None
# replace old variable
df = forced_df
# check first minute is 0
if df.index[0].minute != 0:
raise ValueError("Minutes must be 0.")
return df
|
[
"Parameters",
"----------",
"df"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/weather_data/weather_data.py#L451-L487
|
[
"def",
"_check_and_sanitize_datetime_instants",
"(",
"df",
")",
":",
"# leave if not relevant",
"if",
"df",
"is",
"None",
"or",
"len",
"(",
"df",
")",
"==",
"0",
":",
"return",
"df",
"# check datetime index",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
",",
"pd",
".",
"DatetimeIndex",
")",
":",
"raise",
"ValueError",
"(",
"\"df index must be a datetime index.\"",
")",
"# force frequency if needed",
"if",
"df",
".",
"index",
".",
"freq",
"!=",
"\"H\"",
":",
"forced_df",
"=",
"df",
".",
"asfreq",
"(",
"\"H\"",
")",
"# check no change",
"try",
":",
"assert_index_equal",
"(",
"df",
".",
"index",
",",
"forced_df",
".",
"index",
")",
"except",
"AssertionError",
":",
"raise",
"ValueError",
"(",
"f\"Couldn't convert to hourly datetime instants. Probable cause : \"",
"f\"given start instant ({df.index[0]}) is incorrect and data can't match because of leap year issues.\"",
")",
"from",
"None",
"# replace old variable",
"df",
"=",
"forced_df",
"# check first minute is 0",
"if",
"df",
".",
"index",
"[",
"0",
"]",
".",
"minute",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Minutes must be 0.\"",
")",
"return",
"df"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
WeatherData.get_bounds
|
Returns
-------
(start, end)
Datetime instants of beginning and end of data. If no data, will be: (None, None).
|
oplus/weather_data/weather_data.py
|
def get_bounds(self):
"""
Returns
-------
(start, end)
Datetime instants of beginning and end of data. If no data, will be: (None, None).
"""
start, end = None, None
if len(self._weather_series) == 0:
return start, end
for i in (0, -1):
# create or find instant
if self.has_tuple_instants:
row = self._weather_series.iloc[i, :]
instant = dt.datetime(row["year"], row["month"], row["day"], row["hour"], row["minute"])
else:
instant = self._weather_series.index[i].to_pydatetime()
# store
if i == 0:
start = instant
else:
end = instant
return start, end
|
def get_bounds(self):
"""
Returns
-------
(start, end)
Datetime instants of beginning and end of data. If no data, will be: (None, None).
"""
start, end = None, None
if len(self._weather_series) == 0:
return start, end
for i in (0, -1):
# create or find instant
if self.has_tuple_instants:
row = self._weather_series.iloc[i, :]
instant = dt.datetime(row["year"], row["month"], row["day"], row["hour"], row["minute"])
else:
instant = self._weather_series.index[i].to_pydatetime()
# store
if i == 0:
start = instant
else:
end = instant
return start, end
|
[
"Returns",
"-------",
"(",
"start",
"end",
")"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/weather_data/weather_data.py#L314-L340
|
[
"def",
"get_bounds",
"(",
"self",
")",
":",
"start",
",",
"end",
"=",
"None",
",",
"None",
"if",
"len",
"(",
"self",
".",
"_weather_series",
")",
"==",
"0",
":",
"return",
"start",
",",
"end",
"for",
"i",
"in",
"(",
"0",
",",
"-",
"1",
")",
":",
"# create or find instant",
"if",
"self",
".",
"has_tuple_instants",
":",
"row",
"=",
"self",
".",
"_weather_series",
".",
"iloc",
"[",
"i",
",",
":",
"]",
"instant",
"=",
"dt",
".",
"datetime",
"(",
"row",
"[",
"\"year\"",
"]",
",",
"row",
"[",
"\"month\"",
"]",
",",
"row",
"[",
"\"day\"",
"]",
",",
"row",
"[",
"\"hour\"",
"]",
",",
"row",
"[",
"\"minute\"",
"]",
")",
"else",
":",
"instant",
"=",
"self",
".",
"_weather_series",
".",
"index",
"[",
"i",
"]",
".",
"to_pydatetime",
"(",
")",
"# store",
"if",
"i",
"==",
"0",
":",
"start",
"=",
"instant",
"else",
":",
"end",
"=",
"instant",
"return",
"start",
",",
"end"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
WeatherData.from_epw
|
Parameters
----------
buffer_or_path: buffer or path containing epw format.
Returns
-------
WeatherData instance.
|
oplus/weather_data/weather_data.py
|
def from_epw(cls, buffer_or_path):
"""
Parameters
----------
buffer_or_path: buffer or path containing epw format.
Returns
-------
WeatherData instance.
"""
from .epw_parse import parse_epw
_, buffer = to_buffer(buffer_or_path)
with buffer as f:
return parse_epw(f)
|
def from_epw(cls, buffer_or_path):
"""
Parameters
----------
buffer_or_path: buffer or path containing epw format.
Returns
-------
WeatherData instance.
"""
from .epw_parse import parse_epw
_, buffer = to_buffer(buffer_or_path)
with buffer as f:
return parse_epw(f)
|
[
"Parameters",
"----------",
"buffer_or_path",
":",
"buffer",
"or",
"path",
"containing",
"epw",
"format",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/weather_data/weather_data.py#L356-L369
|
[
"def",
"from_epw",
"(",
"cls",
",",
"buffer_or_path",
")",
":",
"from",
".",
"epw_parse",
"import",
"parse_epw",
"_",
",",
"buffer",
"=",
"to_buffer",
"(",
"buffer_or_path",
")",
"with",
"buffer",
"as",
"f",
":",
"return",
"parse_epw",
"(",
"f",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
WeatherData.to_epw
|
Parameters
----------
buffer_or_path: buffer or path, default None
Buffer or path to write into. If None, will return a string containing epw info.
Returns
-------
None or a string if buffer_or_path is None.
|
oplus/weather_data/weather_data.py
|
def to_epw(self, buffer_or_path=None):
"""
Parameters
----------
buffer_or_path: buffer or path, default None
Buffer or path to write into. If None, will return a string containing epw info.
Returns
-------
None or a string if buffer_or_path is None.
"""
# copy and change hours convention [0, 23] -> [1, 24]
df = self._weather_series.copy()
df["hour"] += 1
epw_content = self._headers_to_epw() + df.to_csv(header=False, index=False, line_terminator="\n")
return multi_mode_write(
lambda buffer: buffer.write(epw_content),
lambda: epw_content,
buffer_or_path=buffer_or_path
)
|
def to_epw(self, buffer_or_path=None):
"""
Parameters
----------
buffer_or_path: buffer or path, default None
Buffer or path to write into. If None, will return a string containing epw info.
Returns
-------
None or a string if buffer_or_path is None.
"""
# copy and change hours convention [0, 23] -> [1, 24]
df = self._weather_series.copy()
df["hour"] += 1
epw_content = self._headers_to_epw() + df.to_csv(header=False, index=False, line_terminator="\n")
return multi_mode_write(
lambda buffer: buffer.write(epw_content),
lambda: epw_content,
buffer_or_path=buffer_or_path
)
|
[
"Parameters",
"----------",
"buffer_or_path",
":",
"buffer",
"or",
"path",
"default",
"None",
"Buffer",
"or",
"path",
"to",
"write",
"into",
".",
"If",
"None",
"will",
"return",
"a",
"string",
"containing",
"epw",
"info",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/weather_data/weather_data.py#L372-L391
|
[
"def",
"to_epw",
"(",
"self",
",",
"buffer_or_path",
"=",
"None",
")",
":",
"# copy and change hours convention [0, 23] -> [1, 24]",
"df",
"=",
"self",
".",
"_weather_series",
".",
"copy",
"(",
")",
"df",
"[",
"\"hour\"",
"]",
"+=",
"1",
"epw_content",
"=",
"self",
".",
"_headers_to_epw",
"(",
")",
"+",
"df",
".",
"to_csv",
"(",
"header",
"=",
"False",
",",
"index",
"=",
"False",
",",
"line_terminator",
"=",
"\"\\n\"",
")",
"return",
"multi_mode_write",
"(",
"lambda",
"buffer",
":",
"buffer",
".",
"write",
"(",
"epw_content",
")",
",",
"lambda",
":",
"epw_content",
",",
"buffer_or_path",
"=",
"buffer_or_path",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
parse_idf
|
Records are created from string.
They are not attached to idf yet.
in idf: header comment, chapter comments, records
in record: head comment, field comments, tail comment
|
oplus/epm/parse_idf.py
|
def parse_idf(file_like):
"""
Records are created from string.
They are not attached to idf yet.
in idf: header comment, chapter comments, records
in record: head comment, field comments, tail comment
"""
tables_data = {}
head_comment = ""
record_data = None
make_new_record = True
copyright_list = get_multi_line_copyright_message().split("\n")
for i, raw_line in enumerate(file_like):
# manage if copyright
try:
copyright_line = copyright_list[i]
if raw_line.strip() == copyright_line:
# skip copyright line
continue
except IndexError:
pass
# GET LINE CONTENT AND COMMENT
split_line = raw_line.split("!")
# no "!" in the raw_line
if len(split_line) == 1:
# this is an empty line
if len(split_line[0].strip()) == 0:
content, comment = None, None
# this is a record line with no comments
else:
content, comment = split_line[0].strip(), None
# there is at least one "!" in the raw_line
else:
# this is a comment line
if len(split_line[0].strip()) == 0:
content, comment = None, "!".join(split_line[1:])
# this is a record line with a comment
else:
content, comment = split_line[0].strip(), "!".join(split_line[1:])
# SKIP CURRENT LINE IF VOID
if (content, comment) == (None, None):
continue
# NO CONTENT
if not content:
if record_data is None: # we only manage head idf comment
head_comment += comment.strip() + "\n"
continue
# CONTENT
# check if record end and prepare
record_end = content[-1] == ";"
content = content[:-1] # we tear comma or semi-colon
content_l = [text.strip() for text in content.split(",")]
# record creation if needed
if make_new_record:
# get table ref
table_ref = table_name_to_ref(content_l[0].strip())
# skip if special table
if table_ref.lower() in (
"lead input",
"end lead input",
"simulation data",
"end simulation data"
):
continue
# declare table if necessary
if table_ref not in tables_data:
tables_data[table_ref] = []
# create and store record
record_data = dict()
tables_data[table_ref].append(record_data)
# prepare in case fields on the same line
content_l = content_l[1:]
make_new_record = False
# fields
for value_s in content_l:
field_index = len(record_data)
record_data[field_index] = value_s
# signal that new record must be created
if record_end:
make_new_record = True
# add comment key
tables_data["_comment"] = head_comment
return tables_data
|
def parse_idf(file_like):
"""
Records are created from string.
They are not attached to idf yet.
in idf: header comment, chapter comments, records
in record: head comment, field comments, tail comment
"""
tables_data = {}
head_comment = ""
record_data = None
make_new_record = True
copyright_list = get_multi_line_copyright_message().split("\n")
for i, raw_line in enumerate(file_like):
# manage if copyright
try:
copyright_line = copyright_list[i]
if raw_line.strip() == copyright_line:
# skip copyright line
continue
except IndexError:
pass
# GET LINE CONTENT AND COMMENT
split_line = raw_line.split("!")
# no "!" in the raw_line
if len(split_line) == 1:
# this is an empty line
if len(split_line[0].strip()) == 0:
content, comment = None, None
# this is a record line with no comments
else:
content, comment = split_line[0].strip(), None
# there is at least one "!" in the raw_line
else:
# this is a comment line
if len(split_line[0].strip()) == 0:
content, comment = None, "!".join(split_line[1:])
# this is a record line with a comment
else:
content, comment = split_line[0].strip(), "!".join(split_line[1:])
# SKIP CURRENT LINE IF VOID
if (content, comment) == (None, None):
continue
# NO CONTENT
if not content:
if record_data is None: # we only manage head idf comment
head_comment += comment.strip() + "\n"
continue
# CONTENT
# check if record end and prepare
record_end = content[-1] == ";"
content = content[:-1] # we tear comma or semi-colon
content_l = [text.strip() for text in content.split(",")]
# record creation if needed
if make_new_record:
# get table ref
table_ref = table_name_to_ref(content_l[0].strip())
# skip if special table
if table_ref.lower() in (
"lead input",
"end lead input",
"simulation data",
"end simulation data"
):
continue
# declare table if necessary
if table_ref not in tables_data:
tables_data[table_ref] = []
# create and store record
record_data = dict()
tables_data[table_ref].append(record_data)
# prepare in case fields on the same line
content_l = content_l[1:]
make_new_record = False
# fields
for value_s in content_l:
field_index = len(record_data)
record_data[field_index] = value_s
# signal that new record must be created
if record_end:
make_new_record = True
# add comment key
tables_data["_comment"] = head_comment
return tables_data
|
[
"Records",
"are",
"created",
"from",
"string",
".",
"They",
"are",
"not",
"attached",
"to",
"idf",
"yet",
".",
"in",
"idf",
":",
"header",
"comment",
"chapter",
"comments",
"records",
"in",
"record",
":",
"head",
"comment",
"field",
"comments",
"tail",
"comment"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/parse_idf.py#L5-L102
|
[
"def",
"parse_idf",
"(",
"file_like",
")",
":",
"tables_data",
"=",
"{",
"}",
"head_comment",
"=",
"\"\"",
"record_data",
"=",
"None",
"make_new_record",
"=",
"True",
"copyright_list",
"=",
"get_multi_line_copyright_message",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"for",
"i",
",",
"raw_line",
"in",
"enumerate",
"(",
"file_like",
")",
":",
"# manage if copyright",
"try",
":",
"copyright_line",
"=",
"copyright_list",
"[",
"i",
"]",
"if",
"raw_line",
".",
"strip",
"(",
")",
"==",
"copyright_line",
":",
"# skip copyright line",
"continue",
"except",
"IndexError",
":",
"pass",
"# GET LINE CONTENT AND COMMENT",
"split_line",
"=",
"raw_line",
".",
"split",
"(",
"\"!\"",
")",
"# no \"!\" in the raw_line",
"if",
"len",
"(",
"split_line",
")",
"==",
"1",
":",
"# this is an empty line",
"if",
"len",
"(",
"split_line",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"content",
",",
"comment",
"=",
"None",
",",
"None",
"# this is a record line with no comments",
"else",
":",
"content",
",",
"comment",
"=",
"split_line",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"None",
"# there is at least one \"!\" in the raw_line",
"else",
":",
"# this is a comment line",
"if",
"len",
"(",
"split_line",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"content",
",",
"comment",
"=",
"None",
",",
"\"!\"",
".",
"join",
"(",
"split_line",
"[",
"1",
":",
"]",
")",
"# this is a record line with a comment",
"else",
":",
"content",
",",
"comment",
"=",
"split_line",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"\"!\"",
".",
"join",
"(",
"split_line",
"[",
"1",
":",
"]",
")",
"# SKIP CURRENT LINE IF VOID",
"if",
"(",
"content",
",",
"comment",
")",
"==",
"(",
"None",
",",
"None",
")",
":",
"continue",
"# NO CONTENT",
"if",
"not",
"content",
":",
"if",
"record_data",
"is",
"None",
":",
"# we only manage head idf comment",
"head_comment",
"+=",
"comment",
".",
"strip",
"(",
")",
"+",
"\"\\n\"",
"continue",
"# CONTENT",
"# check if record end and prepare",
"record_end",
"=",
"content",
"[",
"-",
"1",
"]",
"==",
"\";\"",
"content",
"=",
"content",
"[",
":",
"-",
"1",
"]",
"# we tear comma or semi-colon",
"content_l",
"=",
"[",
"text",
".",
"strip",
"(",
")",
"for",
"text",
"in",
"content",
".",
"split",
"(",
"\",\"",
")",
"]",
"# record creation if needed",
"if",
"make_new_record",
":",
"# get table ref",
"table_ref",
"=",
"table_name_to_ref",
"(",
"content_l",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"# skip if special table",
"if",
"table_ref",
".",
"lower",
"(",
")",
"in",
"(",
"\"lead input\"",
",",
"\"end lead input\"",
",",
"\"simulation data\"",
",",
"\"end simulation data\"",
")",
":",
"continue",
"# declare table if necessary",
"if",
"table_ref",
"not",
"in",
"tables_data",
":",
"tables_data",
"[",
"table_ref",
"]",
"=",
"[",
"]",
"# create and store record",
"record_data",
"=",
"dict",
"(",
")",
"tables_data",
"[",
"table_ref",
"]",
".",
"append",
"(",
"record_data",
")",
"# prepare in case fields on the same line",
"content_l",
"=",
"content_l",
"[",
"1",
":",
"]",
"make_new_record",
"=",
"False",
"# fields",
"for",
"value_s",
"in",
"content_l",
":",
"field_index",
"=",
"len",
"(",
"record_data",
")",
"record_data",
"[",
"field_index",
"]",
"=",
"value_s",
"# signal that new record must be created",
"if",
"record_end",
":",
"make_new_record",
"=",
"True",
"# add comment key",
"tables_data",
"[",
"\"_comment\"",
"]",
"=",
"head_comment",
"return",
"tables_data"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
run_eplus
|
Parameters
----------
epm_or_idf_path:
weather_data_or_epw_path
simulation_dir_path
stdout: default sys.stdout
stderr: default sys.stderr
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds)
|
oplus/simulation.py
|
def run_eplus(epm_or_idf_path, weather_data_or_epw_path, simulation_dir_path, stdout=None, stderr=None, beat_freq=None):
"""
Parameters
----------
epm_or_idf_path:
weather_data_or_epw_path
simulation_dir_path
stdout: default sys.stdout
stderr: default sys.stderr
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds)
"""
# work with absolute paths
simulation_dir_path = os.path.abspath(simulation_dir_path)
# check dir path
if not os.path.isdir(simulation_dir_path):
raise NotADirectoryError("Simulation directory does not exist: '%s'." % simulation_dir_path)
# epm
if not isinstance(epm_or_idf_path, Epm):
# we don't copy file directly because we want to manage it's external files
# could be optimized (use _copy_without_read_only)
epm = Epm.from_idf(epm_or_idf_path)
else:
epm = epm_or_idf_path
# create idf
simulation_idf_path = os.path.join(simulation_dir_path, CONF.default_model_name + ".idf")
epm.to_idf(simulation_idf_path)
# weather data
simulation_epw_path = os.path.join(simulation_dir_path, CONF.default_model_name + ".epw")
if isinstance(weather_data_or_epw_path, WeatherData):
weather_data_or_epw_path.to_epw(simulation_epw_path)
else:
# no need to load: we copy directly
_copy_without_read_only(weather_data_or_epw_path, simulation_epw_path)
# copy epw if needed (depends on os/eplus version)
temp_epw_path = get_simulated_epw_path()
if temp_epw_path is not None:
_copy_without_read_only(simulation_epw_path, temp_epw_path)
# prepare command
eplus_relative_cmd = get_simulation_base_command()
eplus_cmd = os.path.join(CONF.eplus_base_dir_path, eplus_relative_cmd)
# idf
idf_command_style = get_simulation_input_command_style("idf")
if idf_command_style == SIMULATION_INPUT_COMMAND_STYLES.simu_dir:
idf_file_cmd = os.path.join(simulation_dir_path, CONF.default_model_name)
elif idf_command_style == SIMULATION_INPUT_COMMAND_STYLES.file_path:
idf_file_cmd = simulation_idf_path
else:
raise AssertionError("should not be here")
# epw
epw_command_style = get_simulation_input_command_style("epw")
if epw_command_style == SIMULATION_INPUT_COMMAND_STYLES.simu_dir:
epw_file_cmd = os.path.join(simulation_dir_path, CONF.default_model_name)
elif epw_command_style == SIMULATION_INPUT_COMMAND_STYLES.file_path:
epw_file_cmd = simulation_epw_path
else:
raise AssertionError("should not be here")
# command list
simulation_command_style = get_simulation_command_style()
if simulation_command_style == SIMULATION_COMMAND_STYLES.args:
cmd_l = [eplus_cmd, idf_file_cmd, epw_file_cmd]
elif simulation_command_style == SIMULATION_COMMAND_STYLES.kwargs:
cmd_l = [eplus_cmd, "-w", epw_file_cmd, "-r", idf_file_cmd]
else:
raise RuntimeError("should not be here")
# launch calculation
run_subprocess(
cmd_l,
cwd=simulation_dir_path,
stdout=stdout,
stderr=stderr,
beat_freq=beat_freq
)
# if needed, we delete temp weather data (only on Windows, see above)
if (temp_epw_path is not None) and os.path.isfile(temp_epw_path):
os.remove(os.path.join(temp_epw_path))
|
def run_eplus(epm_or_idf_path, weather_data_or_epw_path, simulation_dir_path, stdout=None, stderr=None, beat_freq=None):
"""
Parameters
----------
epm_or_idf_path:
weather_data_or_epw_path
simulation_dir_path
stdout: default sys.stdout
stderr: default sys.stderr
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds)
"""
# work with absolute paths
simulation_dir_path = os.path.abspath(simulation_dir_path)
# check dir path
if not os.path.isdir(simulation_dir_path):
raise NotADirectoryError("Simulation directory does not exist: '%s'." % simulation_dir_path)
# epm
if not isinstance(epm_or_idf_path, Epm):
# we don't copy file directly because we want to manage it's external files
# could be optimized (use _copy_without_read_only)
epm = Epm.from_idf(epm_or_idf_path)
else:
epm = epm_or_idf_path
# create idf
simulation_idf_path = os.path.join(simulation_dir_path, CONF.default_model_name + ".idf")
epm.to_idf(simulation_idf_path)
# weather data
simulation_epw_path = os.path.join(simulation_dir_path, CONF.default_model_name + ".epw")
if isinstance(weather_data_or_epw_path, WeatherData):
weather_data_or_epw_path.to_epw(simulation_epw_path)
else:
# no need to load: we copy directly
_copy_without_read_only(weather_data_or_epw_path, simulation_epw_path)
# copy epw if needed (depends on os/eplus version)
temp_epw_path = get_simulated_epw_path()
if temp_epw_path is not None:
_copy_without_read_only(simulation_epw_path, temp_epw_path)
# prepare command
eplus_relative_cmd = get_simulation_base_command()
eplus_cmd = os.path.join(CONF.eplus_base_dir_path, eplus_relative_cmd)
# idf
idf_command_style = get_simulation_input_command_style("idf")
if idf_command_style == SIMULATION_INPUT_COMMAND_STYLES.simu_dir:
idf_file_cmd = os.path.join(simulation_dir_path, CONF.default_model_name)
elif idf_command_style == SIMULATION_INPUT_COMMAND_STYLES.file_path:
idf_file_cmd = simulation_idf_path
else:
raise AssertionError("should not be here")
# epw
epw_command_style = get_simulation_input_command_style("epw")
if epw_command_style == SIMULATION_INPUT_COMMAND_STYLES.simu_dir:
epw_file_cmd = os.path.join(simulation_dir_path, CONF.default_model_name)
elif epw_command_style == SIMULATION_INPUT_COMMAND_STYLES.file_path:
epw_file_cmd = simulation_epw_path
else:
raise AssertionError("should not be here")
# command list
simulation_command_style = get_simulation_command_style()
if simulation_command_style == SIMULATION_COMMAND_STYLES.args:
cmd_l = [eplus_cmd, idf_file_cmd, epw_file_cmd]
elif simulation_command_style == SIMULATION_COMMAND_STYLES.kwargs:
cmd_l = [eplus_cmd, "-w", epw_file_cmd, "-r", idf_file_cmd]
else:
raise RuntimeError("should not be here")
# launch calculation
run_subprocess(
cmd_l,
cwd=simulation_dir_path,
stdout=stdout,
stderr=stderr,
beat_freq=beat_freq
)
# if needed, we delete temp weather data (only on Windows, see above)
if (temp_epw_path is not None) and os.path.isfile(temp_epw_path):
os.remove(os.path.join(temp_epw_path))
|
[
"Parameters",
"----------",
"epm_or_idf_path",
":",
"weather_data_or_epw_path",
"simulation_dir_path",
"stdout",
":",
"default",
"sys",
".",
"stdout",
"stderr",
":",
"default",
"sys",
".",
"stderr",
"beat_freq",
":",
"if",
"not",
"none",
"stdout",
"will",
"be",
"used",
"at",
"least",
"every",
"beat_freq",
"(",
"in",
"seconds",
")"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/simulation.py#L313-L398
|
[
"def",
"run_eplus",
"(",
"epm_or_idf_path",
",",
"weather_data_or_epw_path",
",",
"simulation_dir_path",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"beat_freq",
"=",
"None",
")",
":",
"# work with absolute paths",
"simulation_dir_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"simulation_dir_path",
")",
"# check dir path",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"simulation_dir_path",
")",
":",
"raise",
"NotADirectoryError",
"(",
"\"Simulation directory does not exist: '%s'.\"",
"%",
"simulation_dir_path",
")",
"# epm",
"if",
"not",
"isinstance",
"(",
"epm_or_idf_path",
",",
"Epm",
")",
":",
"# we don't copy file directly because we want to manage it's external files",
"# could be optimized (use _copy_without_read_only)",
"epm",
"=",
"Epm",
".",
"from_idf",
"(",
"epm_or_idf_path",
")",
"else",
":",
"epm",
"=",
"epm_or_idf_path",
"# create idf",
"simulation_idf_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"simulation_dir_path",
",",
"CONF",
".",
"default_model_name",
"+",
"\".idf\"",
")",
"epm",
".",
"to_idf",
"(",
"simulation_idf_path",
")",
"# weather data",
"simulation_epw_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"simulation_dir_path",
",",
"CONF",
".",
"default_model_name",
"+",
"\".epw\"",
")",
"if",
"isinstance",
"(",
"weather_data_or_epw_path",
",",
"WeatherData",
")",
":",
"weather_data_or_epw_path",
".",
"to_epw",
"(",
"simulation_epw_path",
")",
"else",
":",
"# no need to load: we copy directly",
"_copy_without_read_only",
"(",
"weather_data_or_epw_path",
",",
"simulation_epw_path",
")",
"# copy epw if needed (depends on os/eplus version)",
"temp_epw_path",
"=",
"get_simulated_epw_path",
"(",
")",
"if",
"temp_epw_path",
"is",
"not",
"None",
":",
"_copy_without_read_only",
"(",
"simulation_epw_path",
",",
"temp_epw_path",
")",
"# prepare command",
"eplus_relative_cmd",
"=",
"get_simulation_base_command",
"(",
")",
"eplus_cmd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CONF",
".",
"eplus_base_dir_path",
",",
"eplus_relative_cmd",
")",
"# idf",
"idf_command_style",
"=",
"get_simulation_input_command_style",
"(",
"\"idf\"",
")",
"if",
"idf_command_style",
"==",
"SIMULATION_INPUT_COMMAND_STYLES",
".",
"simu_dir",
":",
"idf_file_cmd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"simulation_dir_path",
",",
"CONF",
".",
"default_model_name",
")",
"elif",
"idf_command_style",
"==",
"SIMULATION_INPUT_COMMAND_STYLES",
".",
"file_path",
":",
"idf_file_cmd",
"=",
"simulation_idf_path",
"else",
":",
"raise",
"AssertionError",
"(",
"\"should not be here\"",
")",
"# epw",
"epw_command_style",
"=",
"get_simulation_input_command_style",
"(",
"\"epw\"",
")",
"if",
"epw_command_style",
"==",
"SIMULATION_INPUT_COMMAND_STYLES",
".",
"simu_dir",
":",
"epw_file_cmd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"simulation_dir_path",
",",
"CONF",
".",
"default_model_name",
")",
"elif",
"epw_command_style",
"==",
"SIMULATION_INPUT_COMMAND_STYLES",
".",
"file_path",
":",
"epw_file_cmd",
"=",
"simulation_epw_path",
"else",
":",
"raise",
"AssertionError",
"(",
"\"should not be here\"",
")",
"# command list",
"simulation_command_style",
"=",
"get_simulation_command_style",
"(",
")",
"if",
"simulation_command_style",
"==",
"SIMULATION_COMMAND_STYLES",
".",
"args",
":",
"cmd_l",
"=",
"[",
"eplus_cmd",
",",
"idf_file_cmd",
",",
"epw_file_cmd",
"]",
"elif",
"simulation_command_style",
"==",
"SIMULATION_COMMAND_STYLES",
".",
"kwargs",
":",
"cmd_l",
"=",
"[",
"eplus_cmd",
",",
"\"-w\"",
",",
"epw_file_cmd",
",",
"\"-r\"",
",",
"idf_file_cmd",
"]",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"should not be here\"",
")",
"# launch calculation",
"run_subprocess",
"(",
"cmd_l",
",",
"cwd",
"=",
"simulation_dir_path",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"beat_freq",
"=",
"beat_freq",
")",
"# if needed, we delete temp weather data (only on Windows, see above)",
"if",
"(",
"temp_epw_path",
"is",
"not",
"None",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"temp_epw_path",
")",
":",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_epw_path",
")",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Simulation.simulate
|
Parameters
----------
epm_or_path
weather_data_or_path
base_dir_path: simulation dir path
simulation_name: str, default None
if provided, simulation will be done in {base_dir_path}/{simulation_name}
else, simulation will be done in {base_dir_path}
stdout: stream, default logger.info
stream where EnergyPlus standard output is redirected
stderr: stream, default logger.error
stream where EnergyPlus standard error is redirected
beat_freq: float, default None
if provided, subprocess in which EnergyPlus is run will write at given frequency in standard output. May
be used to monitor subprocess state.
Returns
-------
Simulation instance
|
oplus/simulation.py
|
def simulate(
cls,
epm_or_path,
weather_data_or_path,
base_dir_path,
simulation_name=None,
stdout=None,
stderr=None,
beat_freq=None
):
"""
Parameters
----------
epm_or_path
weather_data_or_path
base_dir_path: simulation dir path
simulation_name: str, default None
if provided, simulation will be done in {base_dir_path}/{simulation_name}
else, simulation will be done in {base_dir_path}
stdout: stream, default logger.info
stream where EnergyPlus standard output is redirected
stderr: stream, default logger.error
stream where EnergyPlus standard error is redirected
beat_freq: float, default None
if provided, subprocess in which EnergyPlus is run will write at given frequency in standard output. May
be used to monitor subprocess state.
Returns
-------
Simulation instance
"""
# manage simulation dir path
if not os.path.isdir(base_dir_path):
raise NotADirectoryError("Base dir path not found: '%s'" % base_dir_path)
simulation_dir_path = base_dir_path if simulation_name is None else os.path.join(base_dir_path, simulation_name)
# make directory if does not exist
if not os.path.exists(simulation_dir_path):
os.mkdir(simulation_dir_path)
# run simulation
stdout = LoggerStreamWriter(logger_name=__name__, level=logging.INFO) if stdout is None else stdout
stderr = LoggerStreamWriter(logger_name=__name__, level=logging.ERROR) if stderr is None else stderr
run_eplus(
epm_or_path,
weather_data_or_path,
simulation_dir_path,
stdout=stdout,
stderr=stderr,
beat_freq=beat_freq
)
# return simulation object
return cls(
base_dir_path,
simulation_name=simulation_name
)
|
def simulate(
cls,
epm_or_path,
weather_data_or_path,
base_dir_path,
simulation_name=None,
stdout=None,
stderr=None,
beat_freq=None
):
"""
Parameters
----------
epm_or_path
weather_data_or_path
base_dir_path: simulation dir path
simulation_name: str, default None
if provided, simulation will be done in {base_dir_path}/{simulation_name}
else, simulation will be done in {base_dir_path}
stdout: stream, default logger.info
stream where EnergyPlus standard output is redirected
stderr: stream, default logger.error
stream where EnergyPlus standard error is redirected
beat_freq: float, default None
if provided, subprocess in which EnergyPlus is run will write at given frequency in standard output. May
be used to monitor subprocess state.
Returns
-------
Simulation instance
"""
# manage simulation dir path
if not os.path.isdir(base_dir_path):
raise NotADirectoryError("Base dir path not found: '%s'" % base_dir_path)
simulation_dir_path = base_dir_path if simulation_name is None else os.path.join(base_dir_path, simulation_name)
# make directory if does not exist
if not os.path.exists(simulation_dir_path):
os.mkdir(simulation_dir_path)
# run simulation
stdout = LoggerStreamWriter(logger_name=__name__, level=logging.INFO) if stdout is None else stdout
stderr = LoggerStreamWriter(logger_name=__name__, level=logging.ERROR) if stderr is None else stderr
run_eplus(
epm_or_path,
weather_data_or_path,
simulation_dir_path,
stdout=stdout,
stderr=stderr,
beat_freq=beat_freq
)
# return simulation object
return cls(
base_dir_path,
simulation_name=simulation_name
)
|
[
"Parameters",
"----------",
"epm_or_path",
"weather_data_or_path",
"base_dir_path",
":",
"simulation",
"dir",
"path",
"simulation_name",
":",
"str",
"default",
"None",
"if",
"provided",
"simulation",
"will",
"be",
"done",
"in",
"{",
"base_dir_path",
"}",
"/",
"{",
"simulation_name",
"}",
"else",
"simulation",
"will",
"be",
"done",
"in",
"{",
"base_dir_path",
"}",
"stdout",
":",
"stream",
"default",
"logger",
".",
"info",
"stream",
"where",
"EnergyPlus",
"standard",
"output",
"is",
"redirected",
"stderr",
":",
"stream",
"default",
"logger",
".",
"error",
"stream",
"where",
"EnergyPlus",
"standard",
"error",
"is",
"redirected",
"beat_freq",
":",
"float",
"default",
"None",
"if",
"provided",
"subprocess",
"in",
"which",
"EnergyPlus",
"is",
"run",
"will",
"write",
"at",
"given",
"frequency",
"in",
"standard",
"output",
".",
"May",
"be",
"used",
"to",
"monitor",
"subprocess",
"state",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/simulation.py#L112-L168
|
[
"def",
"simulate",
"(",
"cls",
",",
"epm_or_path",
",",
"weather_data_or_path",
",",
"base_dir_path",
",",
"simulation_name",
"=",
"None",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"beat_freq",
"=",
"None",
")",
":",
"# manage simulation dir path",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"base_dir_path",
")",
":",
"raise",
"NotADirectoryError",
"(",
"\"Base dir path not found: '%s'\"",
"%",
"base_dir_path",
")",
"simulation_dir_path",
"=",
"base_dir_path",
"if",
"simulation_name",
"is",
"None",
"else",
"os",
".",
"path",
".",
"join",
"(",
"base_dir_path",
",",
"simulation_name",
")",
"# make directory if does not exist",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"simulation_dir_path",
")",
":",
"os",
".",
"mkdir",
"(",
"simulation_dir_path",
")",
"# run simulation",
"stdout",
"=",
"LoggerStreamWriter",
"(",
"logger_name",
"=",
"__name__",
",",
"level",
"=",
"logging",
".",
"INFO",
")",
"if",
"stdout",
"is",
"None",
"else",
"stdout",
"stderr",
"=",
"LoggerStreamWriter",
"(",
"logger_name",
"=",
"__name__",
",",
"level",
"=",
"logging",
".",
"ERROR",
")",
"if",
"stderr",
"is",
"None",
"else",
"stderr",
"run_eplus",
"(",
"epm_or_path",
",",
"weather_data_or_path",
",",
"simulation_dir_path",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"beat_freq",
"=",
"beat_freq",
")",
"# return simulation object",
"return",
"cls",
"(",
"base_dir_path",
",",
"simulation_name",
"=",
"simulation_name",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Simulation._file_refs
|
Defined here so that we can use the class variables, in order to subclass in oplusplus
|
oplus/simulation.py
|
def _file_refs(self):
"""
Defined here so that we can use the class variables, in order to subclass in oplusplus
"""
if self._prepared_file_refs is None:
self._prepared_file_refs = {
FILE_REFS.idf: FileInfo(
constructor=lambda path: self._epm_cls.from_idf(path, idd_or_buffer_or_path=self._idd),
get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.idf)
),
FILE_REFS.epw: FileInfo(
constructor=lambda path: self._weather_data_cls.from_epw(path),
get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.epw)
),
FILE_REFS.eio: FileInfo(
constructor=lambda path: self._eio_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.eio)
),
FILE_REFS.eso: FileInfo(
constructor=lambda path: self._standard_output_cls(path),
get_path=lambda: get_output_file_path(
self.dir_path,
FILE_REFS.eso
)
),
FILE_REFS.mtr: FileInfo(
constructor=lambda path: self._standard_output_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtr)
),
FILE_REFS.mtd: FileInfo(
constructor=lambda path: self._mtd_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtd)
),
FILE_REFS.mdd: FileInfo(
constructor=lambda path: open(path).read(),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mdd)
),
FILE_REFS.err: FileInfo(
constructor=lambda path: self._err_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.err)
),
FILE_REFS.summary_table: FileInfo(
constructor=lambda path: self._summary_table_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.summary_table)
)
}
return self._prepared_file_refs
|
def _file_refs(self):
"""
Defined here so that we can use the class variables, in order to subclass in oplusplus
"""
if self._prepared_file_refs is None:
self._prepared_file_refs = {
FILE_REFS.idf: FileInfo(
constructor=lambda path: self._epm_cls.from_idf(path, idd_or_buffer_or_path=self._idd),
get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.idf)
),
FILE_REFS.epw: FileInfo(
constructor=lambda path: self._weather_data_cls.from_epw(path),
get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.epw)
),
FILE_REFS.eio: FileInfo(
constructor=lambda path: self._eio_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.eio)
),
FILE_REFS.eso: FileInfo(
constructor=lambda path: self._standard_output_cls(path),
get_path=lambda: get_output_file_path(
self.dir_path,
FILE_REFS.eso
)
),
FILE_REFS.mtr: FileInfo(
constructor=lambda path: self._standard_output_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtr)
),
FILE_REFS.mtd: FileInfo(
constructor=lambda path: self._mtd_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtd)
),
FILE_REFS.mdd: FileInfo(
constructor=lambda path: open(path).read(),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mdd)
),
FILE_REFS.err: FileInfo(
constructor=lambda path: self._err_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.err)
),
FILE_REFS.summary_table: FileInfo(
constructor=lambda path: self._summary_table_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.summary_table)
)
}
return self._prepared_file_refs
|
[
"Defined",
"here",
"so",
"that",
"we",
"can",
"use",
"the",
"class",
"variables",
"in",
"order",
"to",
"subclass",
"in",
"oplusplus"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/simulation.py#L203-L250
|
[
"def",
"_file_refs",
"(",
"self",
")",
":",
"if",
"self",
".",
"_prepared_file_refs",
"is",
"None",
":",
"self",
".",
"_prepared_file_refs",
"=",
"{",
"FILE_REFS",
".",
"idf",
":",
"FileInfo",
"(",
"constructor",
"=",
"lambda",
"path",
":",
"self",
".",
"_epm_cls",
".",
"from_idf",
"(",
"path",
",",
"idd_or_buffer_or_path",
"=",
"self",
".",
"_idd",
")",
",",
"get_path",
"=",
"lambda",
":",
"get_input_file_path",
"(",
"self",
".",
"dir_path",
",",
"FILE_REFS",
".",
"idf",
")",
")",
",",
"FILE_REFS",
".",
"epw",
":",
"FileInfo",
"(",
"constructor",
"=",
"lambda",
"path",
":",
"self",
".",
"_weather_data_cls",
".",
"from_epw",
"(",
"path",
")",
",",
"get_path",
"=",
"lambda",
":",
"get_input_file_path",
"(",
"self",
".",
"dir_path",
",",
"FILE_REFS",
".",
"epw",
")",
")",
",",
"FILE_REFS",
".",
"eio",
":",
"FileInfo",
"(",
"constructor",
"=",
"lambda",
"path",
":",
"self",
".",
"_eio_cls",
"(",
"path",
")",
",",
"get_path",
"=",
"lambda",
":",
"get_output_file_path",
"(",
"self",
".",
"dir_path",
",",
"FILE_REFS",
".",
"eio",
")",
")",
",",
"FILE_REFS",
".",
"eso",
":",
"FileInfo",
"(",
"constructor",
"=",
"lambda",
"path",
":",
"self",
".",
"_standard_output_cls",
"(",
"path",
")",
",",
"get_path",
"=",
"lambda",
":",
"get_output_file_path",
"(",
"self",
".",
"dir_path",
",",
"FILE_REFS",
".",
"eso",
")",
")",
",",
"FILE_REFS",
".",
"mtr",
":",
"FileInfo",
"(",
"constructor",
"=",
"lambda",
"path",
":",
"self",
".",
"_standard_output_cls",
"(",
"path",
")",
",",
"get_path",
"=",
"lambda",
":",
"get_output_file_path",
"(",
"self",
".",
"dir_path",
",",
"FILE_REFS",
".",
"mtr",
")",
")",
",",
"FILE_REFS",
".",
"mtd",
":",
"FileInfo",
"(",
"constructor",
"=",
"lambda",
"path",
":",
"self",
".",
"_mtd_cls",
"(",
"path",
")",
",",
"get_path",
"=",
"lambda",
":",
"get_output_file_path",
"(",
"self",
".",
"dir_path",
",",
"FILE_REFS",
".",
"mtd",
")",
")",
",",
"FILE_REFS",
".",
"mdd",
":",
"FileInfo",
"(",
"constructor",
"=",
"lambda",
"path",
":",
"open",
"(",
"path",
")",
".",
"read",
"(",
")",
",",
"get_path",
"=",
"lambda",
":",
"get_output_file_path",
"(",
"self",
".",
"dir_path",
",",
"FILE_REFS",
".",
"mdd",
")",
")",
",",
"FILE_REFS",
".",
"err",
":",
"FileInfo",
"(",
"constructor",
"=",
"lambda",
"path",
":",
"self",
".",
"_err_cls",
"(",
"path",
")",
",",
"get_path",
"=",
"lambda",
":",
"get_output_file_path",
"(",
"self",
".",
"dir_path",
",",
"FILE_REFS",
".",
"err",
")",
")",
",",
"FILE_REFS",
".",
"summary_table",
":",
"FileInfo",
"(",
"constructor",
"=",
"lambda",
"path",
":",
"self",
".",
"_summary_table_cls",
"(",
"path",
")",
",",
"get_path",
"=",
"lambda",
":",
"get_output_file_path",
"(",
"self",
".",
"dir_path",
",",
"FILE_REFS",
".",
"summary_table",
")",
")",
"}",
"return",
"self",
".",
"_prepared_file_refs"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Simulation.exists
|
Parameters
----------
file_ref: str
reference of file.
Available references: 'idf', 'epw', 'eio', 'eso', 'mtr', 'mtd', 'mdd', 'err', 'summary_table'
See EnergyPlus documentation for more information.
Returns
-------
Boolean
|
oplus/simulation.py
|
def exists(self, file_ref):
"""
Parameters
----------
file_ref: str
reference of file.
Available references: 'idf', 'epw', 'eio', 'eso', 'mtr', 'mtd', 'mdd', 'err', 'summary_table'
See EnergyPlus documentation for more information.
Returns
-------
Boolean
"""
if file_ref not in FILE_REFS:
raise ValueError("Unknown file_ref: '%s'. Available: '%s'." % (file_ref, list(sorted(FILE_REFS._fields))))
return os.path.isfile(self._path(file_ref))
|
def exists(self, file_ref):
"""
Parameters
----------
file_ref: str
reference of file.
Available references: 'idf', 'epw', 'eio', 'eso', 'mtr', 'mtd', 'mdd', 'err', 'summary_table'
See EnergyPlus documentation for more information.
Returns
-------
Boolean
"""
if file_ref not in FILE_REFS:
raise ValueError("Unknown file_ref: '%s'. Available: '%s'." % (file_ref, list(sorted(FILE_REFS._fields))))
return os.path.isfile(self._path(file_ref))
|
[
"Parameters",
"----------",
"file_ref",
":",
"str",
"reference",
"of",
"file",
".",
"Available",
"references",
":",
"idf",
"epw",
"eio",
"eso",
"mtr",
"mtd",
"mdd",
"err",
"summary_table",
"See",
"EnergyPlus",
"documentation",
"for",
"more",
"information",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/simulation.py#L275-L290
|
[
"def",
"exists",
"(",
"self",
",",
"file_ref",
")",
":",
"if",
"file_ref",
"not",
"in",
"FILE_REFS",
":",
"raise",
"ValueError",
"(",
"\"Unknown file_ref: '%s'. Available: '%s'.\"",
"%",
"(",
"file_ref",
",",
"list",
"(",
"sorted",
"(",
"FILE_REFS",
".",
"_fields",
")",
")",
")",
")",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"_path",
"(",
"file_ref",
")",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Simulation.get_file_path
|
Parameters
----------
file_ref: str
reference of file.
Available references: 'idf', 'epw', 'eio', 'eso', 'mtr', 'mtd', 'mdd', 'err', 'summary_table'
See EnergyPlus documentation for more information.
Returns
-------
Instance of required output.
|
oplus/simulation.py
|
def get_file_path(self, file_ref):
"""
Parameters
----------
file_ref: str
reference of file.
Available references: 'idf', 'epw', 'eio', 'eso', 'mtr', 'mtd', 'mdd', 'err', 'summary_table'
See EnergyPlus documentation for more information.
Returns
-------
Instance of required output.
"""
if not self.exists(file_ref):
raise FileNotFoundError("File '%s' not found in simulation '%s'." % (file_ref, self._path(file_ref)))
return self._path(file_ref)
|
def get_file_path(self, file_ref):
"""
Parameters
----------
file_ref: str
reference of file.
Available references: 'idf', 'epw', 'eio', 'eso', 'mtr', 'mtd', 'mdd', 'err', 'summary_table'
See EnergyPlus documentation for more information.
Returns
-------
Instance of required output.
"""
if not self.exists(file_ref):
raise FileNotFoundError("File '%s' not found in simulation '%s'." % (file_ref, self._path(file_ref)))
return self._path(file_ref)
|
[
"Parameters",
"----------",
"file_ref",
":",
"str",
"reference",
"of",
"file",
".",
"Available",
"references",
":",
"idf",
"epw",
"eio",
"eso",
"mtr",
"mtd",
"mdd",
"err",
"summary_table",
"See",
"EnergyPlus",
"documentation",
"for",
"more",
"information",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/simulation.py#L292-L307
|
[
"def",
"get_file_path",
"(",
"self",
",",
"file_ref",
")",
":",
"if",
"not",
"self",
".",
"exists",
"(",
"file_ref",
")",
":",
"raise",
"FileNotFoundError",
"(",
"\"File '%s' not found in simulation '%s'.\"",
"%",
"(",
"file_ref",
",",
"self",
".",
"_path",
"(",
"file_ref",
")",
")",
")",
"return",
"self",
".",
"_path",
"(",
"file_ref",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
default_external_files_dir_name
|
Parameters
----------
model_name: with or without extension
|
oplus/epm/epm.py
|
def default_external_files_dir_name(model_name):
"""
Parameters
----------
model_name: with or without extension
"""
name, ext = os.path.splitext(model_name)
return name + CONF.external_files_suffix
|
def default_external_files_dir_name(model_name):
"""
Parameters
----------
model_name: with or without extension
"""
name, ext = os.path.splitext(model_name)
return name + CONF.external_files_suffix
|
[
"Parameters",
"----------",
"model_name",
":",
"with",
"or",
"without",
"extension"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L27-L34
|
[
"def",
"default_external_files_dir_name",
"(",
"model_name",
")",
":",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"model_name",
")",
"return",
"name",
"+",
"CONF",
".",
"external_files_suffix"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Epm._dev_populate_from_json_data
|
!! Must only be called once, when empty !!
|
oplus/epm/epm.py
|
def _dev_populate_from_json_data(self, json_data):
"""
!! Must only be called once, when empty !!
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. add inert
# * data is checked
# * old links are unregistered
# * record is stored in table (=> pk uniqueness is checked)
# 2. activate: hooks, links, external files
# manage comment if any
comment = json_data.pop("_comment", None)
if comment is not None:
self._comment = comment
# populate external files
external_files_data = json_data.pop("_external_files", dict())
self._dev_external_files_manager.populate_from_json_data(external_files_data)
# manage records
added_records = []
for table_ref, json_data_records in json_data.items():
# find table
table = getattr(self, table_ref)
# create record (inert)
records = table._dev_add_inert(json_data_records)
# add records (inert)
added_records.extend(records)
# activate hooks
for r in added_records:
r._dev_activate_hooks()
# activate links and external files
for r in added_records:
r._dev_activate_links()
r._dev_activate_external_files()
|
def _dev_populate_from_json_data(self, json_data):
"""
!! Must only be called once, when empty !!
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. add inert
# * data is checked
# * old links are unregistered
# * record is stored in table (=> pk uniqueness is checked)
# 2. activate: hooks, links, external files
# manage comment if any
comment = json_data.pop("_comment", None)
if comment is not None:
self._comment = comment
# populate external files
external_files_data = json_data.pop("_external_files", dict())
self._dev_external_files_manager.populate_from_json_data(external_files_data)
# manage records
added_records = []
for table_ref, json_data_records in json_data.items():
# find table
table = getattr(self, table_ref)
# create record (inert)
records = table._dev_add_inert(json_data_records)
# add records (inert)
added_records.extend(records)
# activate hooks
for r in added_records:
r._dev_activate_hooks()
# activate links and external files
for r in added_records:
r._dev_activate_links()
r._dev_activate_external_files()
|
[
"!!",
"Must",
"only",
"be",
"called",
"once",
"when",
"empty",
"!!"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L100-L143
|
[
"def",
"_dev_populate_from_json_data",
"(",
"self",
",",
"json_data",
")",
":",
"# workflow",
"# --------",
"# (methods belonging to create/update/delete framework:",
"# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)",
"# 1. add inert",
"# * data is checked",
"# * old links are unregistered",
"# * record is stored in table (=> pk uniqueness is checked)",
"# 2. activate: hooks, links, external files",
"# manage comment if any",
"comment",
"=",
"json_data",
".",
"pop",
"(",
"\"_comment\"",
",",
"None",
")",
"if",
"comment",
"is",
"not",
"None",
":",
"self",
".",
"_comment",
"=",
"comment",
"# populate external files",
"external_files_data",
"=",
"json_data",
".",
"pop",
"(",
"\"_external_files\"",
",",
"dict",
"(",
")",
")",
"self",
".",
"_dev_external_files_manager",
".",
"populate_from_json_data",
"(",
"external_files_data",
")",
"# manage records",
"added_records",
"=",
"[",
"]",
"for",
"table_ref",
",",
"json_data_records",
"in",
"json_data",
".",
"items",
"(",
")",
":",
"# find table",
"table",
"=",
"getattr",
"(",
"self",
",",
"table_ref",
")",
"# create record (inert)",
"records",
"=",
"table",
".",
"_dev_add_inert",
"(",
"json_data_records",
")",
"# add records (inert)",
"added_records",
".",
"extend",
"(",
"records",
")",
"# activate hooks",
"for",
"r",
"in",
"added_records",
":",
"r",
".",
"_dev_activate_hooks",
"(",
")",
"# activate links and external files",
"for",
"r",
"in",
"added_records",
":",
"r",
".",
"_dev_activate_links",
"(",
")",
"r",
".",
"_dev_activate_external_files",
"(",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Epm.get_external_files
|
An external file manages file paths.
|
oplus/epm/epm.py
|
def get_external_files(self):
"""
An external file manages file paths.
"""
external_files = []
for table in self._tables.values():
for r in table:
external_files.extend([ef for ef in r.get_external_files()])
return external_files
|
def get_external_files(self):
"""
An external file manages file paths.
"""
external_files = []
for table in self._tables.values():
for r in table:
external_files.extend([ef for ef in r.get_external_files()])
return external_files
|
[
"An",
"external",
"file",
"manages",
"file",
"paths",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L187-L195
|
[
"def",
"get_external_files",
"(",
"self",
")",
":",
"external_files",
"=",
"[",
"]",
"for",
"table",
"in",
"self",
".",
"_tables",
".",
"values",
"(",
")",
":",
"for",
"r",
"in",
"table",
":",
"external_files",
".",
"extend",
"(",
"[",
"ef",
"for",
"ef",
"in",
"r",
".",
"get_external_files",
"(",
")",
"]",
")",
"return",
"external_files"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Epm.set_defaults
|
All fields of Epm with a default value and that are null will be set to their default value.
|
oplus/epm/epm.py
|
def set_defaults(self):
"""
All fields of Epm with a default value and that are null will be set to their default value.
"""
for table in self._tables.values():
for r in table:
r.set_defaults()
|
def set_defaults(self):
"""
All fields of Epm with a default value and that are null will be set to their default value.
"""
for table in self._tables.values():
for r in table:
r.set_defaults()
|
[
"All",
"fields",
"of",
"Epm",
"with",
"a",
"default",
"value",
"and",
"that",
"are",
"null",
"will",
"be",
"set",
"to",
"their",
"default",
"value",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L201-L207
|
[
"def",
"set_defaults",
"(",
"self",
")",
":",
"for",
"table",
"in",
"self",
".",
"_tables",
".",
"values",
"(",
")",
":",
"for",
"r",
"in",
"table",
":",
"r",
".",
"set_defaults",
"(",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Epm.from_json_data
|
Parameters
----------
json_data: dict
Dictionary of serialized data (text, floats, ints, ...). For more information on data structure, create an
Epm and use to_json_data or to_json.
check_required: boolean, default True
If True, will raise an exception if a required field is missing. If False, not not perform any checks.
idd_or_buffer_or_path: (expert) to load using a custom idd
Returns
-------
An Epm instance.
|
oplus/epm/epm.py
|
def from_json_data(cls, json_data, check_required=True, idd_or_buffer_or_path=None):
"""
Parameters
----------
json_data: dict
Dictionary of serialized data (text, floats, ints, ...). For more information on data structure, create an
Epm and use to_json_data or to_json.
check_required: boolean, default True
If True, will raise an exception if a required field is missing. If False, not not perform any checks.
idd_or_buffer_or_path: (expert) to load using a custom idd
Returns
-------
An Epm instance.
"""
epm = cls(
idd_or_buffer_or_path=idd_or_buffer_or_path,
check_required=check_required
)
epm._dev_populate_from_json_data(json_data)
return epm
|
def from_json_data(cls, json_data, check_required=True, idd_or_buffer_or_path=None):
"""
Parameters
----------
json_data: dict
Dictionary of serialized data (text, floats, ints, ...). For more information on data structure, create an
Epm and use to_json_data or to_json.
check_required: boolean, default True
If True, will raise an exception if a required field is missing. If False, not not perform any checks.
idd_or_buffer_or_path: (expert) to load using a custom idd
Returns
-------
An Epm instance.
"""
epm = cls(
idd_or_buffer_or_path=idd_or_buffer_or_path,
check_required=check_required
)
epm._dev_populate_from_json_data(json_data)
return epm
|
[
"Parameters",
"----------",
"json_data",
":",
"dict",
"Dictionary",
"of",
"serialized",
"data",
"(",
"text",
"floats",
"ints",
"...",
")",
".",
"For",
"more",
"information",
"on",
"data",
"structure",
"create",
"an",
"Epm",
"and",
"use",
"to_json_data",
"or",
"to_json",
".",
"check_required",
":",
"boolean",
"default",
"True",
"If",
"True",
"will",
"raise",
"an",
"exception",
"if",
"a",
"required",
"field",
"is",
"missing",
".",
"If",
"False",
"not",
"not",
"perform",
"any",
"checks",
".",
"idd_or_buffer_or_path",
":",
"(",
"expert",
")",
"to",
"load",
"using",
"a",
"custom",
"idd"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L219-L240
|
[
"def",
"from_json_data",
"(",
"cls",
",",
"json_data",
",",
"check_required",
"=",
"True",
",",
"idd_or_buffer_or_path",
"=",
"None",
")",
":",
"epm",
"=",
"cls",
"(",
"idd_or_buffer_or_path",
"=",
"idd_or_buffer_or_path",
",",
"check_required",
"=",
"check_required",
")",
"epm",
".",
"_dev_populate_from_json_data",
"(",
"json_data",
")",
"return",
"epm"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Epm.from_idf
|
Parameters
----------
buffer_or_path: idf buffer or path
check_required: boolean, default True
If True, will raise an exception if a required field is missing. If False, not not perform any checks.
idd_or_buffer_or_path: (expert) to load using a custom idd
Returns
-------
An Epm instance.
|
oplus/epm/epm.py
|
def from_idf(cls, buffer_or_path, check_required=True, idd_or_buffer_or_path=None):
"""
Parameters
----------
buffer_or_path: idf buffer or path
check_required: boolean, default True
If True, will raise an exception if a required field is missing. If False, not not perform any checks.
idd_or_buffer_or_path: (expert) to load using a custom idd
Returns
-------
An Epm instance.
"""
# todo: add geometry only (or equivalent)
return cls._create_from_buffer_or_path(
parse_idf,
buffer_or_path,
idd_or_buffer_or_path=idd_or_buffer_or_path,
check_required=check_required
)
|
def from_idf(cls, buffer_or_path, check_required=True, idd_or_buffer_or_path=None):
"""
Parameters
----------
buffer_or_path: idf buffer or path
check_required: boolean, default True
If True, will raise an exception if a required field is missing. If False, not not perform any checks.
idd_or_buffer_or_path: (expert) to load using a custom idd
Returns
-------
An Epm instance.
"""
# todo: add geometry only (or equivalent)
return cls._create_from_buffer_or_path(
parse_idf,
buffer_or_path,
idd_or_buffer_or_path=idd_or_buffer_or_path,
check_required=check_required
)
|
[
"Parameters",
"----------",
"buffer_or_path",
":",
"idf",
"buffer",
"or",
"path",
"check_required",
":",
"boolean",
"default",
"True",
"If",
"True",
"will",
"raise",
"an",
"exception",
"if",
"a",
"required",
"field",
"is",
"missing",
".",
"If",
"False",
"not",
"not",
"perform",
"any",
"checks",
".",
"idd_or_buffer_or_path",
":",
"(",
"expert",
")",
"to",
"load",
"using",
"a",
"custom",
"idd"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L243-L262
|
[
"def",
"from_idf",
"(",
"cls",
",",
"buffer_or_path",
",",
"check_required",
"=",
"True",
",",
"idd_or_buffer_or_path",
"=",
"None",
")",
":",
"# todo: add geometry only (or equivalent)",
"return",
"cls",
".",
"_create_from_buffer_or_path",
"(",
"parse_idf",
",",
"buffer_or_path",
",",
"idd_or_buffer_or_path",
"=",
"idd_or_buffer_or_path",
",",
"check_required",
"=",
"check_required",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Epm.from_json
|
Parameters
----------
buffer_or_path: json buffer or path
check_required: boolean, default True
If True, will raise an exception if a required field is missing. If False, not not perform any checks.
idd_or_buffer_or_path: (expert) to load using a custom idd
Returns
-------
An Epm instance.
|
oplus/epm/epm.py
|
def from_json(cls, buffer_or_path, check_required=True, idd_or_buffer_or_path=None):
"""
Parameters
----------
buffer_or_path: json buffer or path
check_required: boolean, default True
If True, will raise an exception if a required field is missing. If False, not not perform any checks.
idd_or_buffer_or_path: (expert) to load using a custom idd
Returns
-------
An Epm instance.
"""
return cls._create_from_buffer_or_path(
json.load,
buffer_or_path,
idd_or_buffer_or_path=idd_or_buffer_or_path,
check_required=check_required
)
|
def from_json(cls, buffer_or_path, check_required=True, idd_or_buffer_or_path=None):
"""
Parameters
----------
buffer_or_path: json buffer or path
check_required: boolean, default True
If True, will raise an exception if a required field is missing. If False, not not perform any checks.
idd_or_buffer_or_path: (expert) to load using a custom idd
Returns
-------
An Epm instance.
"""
return cls._create_from_buffer_or_path(
json.load,
buffer_or_path,
idd_or_buffer_or_path=idd_or_buffer_or_path,
check_required=check_required
)
|
[
"Parameters",
"----------",
"buffer_or_path",
":",
"json",
"buffer",
"or",
"path",
"check_required",
":",
"boolean",
"default",
"True",
"If",
"True",
"will",
"raise",
"an",
"exception",
"if",
"a",
"required",
"field",
"is",
"missing",
".",
"If",
"False",
"not",
"not",
"perform",
"any",
"checks",
".",
"idd_or_buffer_or_path",
":",
"(",
"expert",
")",
"to",
"load",
"using",
"a",
"custom",
"idd"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L265-L283
|
[
"def",
"from_json",
"(",
"cls",
",",
"buffer_or_path",
",",
"check_required",
"=",
"True",
",",
"idd_or_buffer_or_path",
"=",
"None",
")",
":",
"return",
"cls",
".",
"_create_from_buffer_or_path",
"(",
"json",
".",
"load",
",",
"buffer_or_path",
",",
"idd_or_buffer_or_path",
"=",
"idd_or_buffer_or_path",
",",
"check_required",
"=",
"check_required",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Epm.to_json_data
|
Returns
-------
A dictionary of serialized data.
|
oplus/epm/epm.py
|
def to_json_data(self):
"""
Returns
-------
A dictionary of serialized data.
"""
# create data
d = collections.OrderedDict((t.get_ref(), t.to_json_data()) for t in self._tables.values())
d["_comment"] = self._comment
d.move_to_end("_comment", last=False)
d["_external_files"] = self._dev_external_files_manager
return d
|
def to_json_data(self):
"""
Returns
-------
A dictionary of serialized data.
"""
# create data
d = collections.OrderedDict((t.get_ref(), t.to_json_data()) for t in self._tables.values())
d["_comment"] = self._comment
d.move_to_end("_comment", last=False)
d["_external_files"] = self._dev_external_files_manager
return d
|
[
"Returns",
"-------",
"A",
"dictionary",
"of",
"serialized",
"data",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L286-L297
|
[
"def",
"to_json_data",
"(",
"self",
")",
":",
"# create data",
"d",
"=",
"collections",
".",
"OrderedDict",
"(",
"(",
"t",
".",
"get_ref",
"(",
")",
",",
"t",
".",
"to_json_data",
"(",
")",
")",
"for",
"t",
"in",
"self",
".",
"_tables",
".",
"values",
"(",
")",
")",
"d",
"[",
"\"_comment\"",
"]",
"=",
"self",
".",
"_comment",
"d",
".",
"move_to_end",
"(",
"\"_comment\"",
",",
"last",
"=",
"False",
")",
"d",
"[",
"\"_external_files\"",
"]",
"=",
"self",
".",
"_dev_external_files_manager",
"return",
"d"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Epm.to_json
|
Parameters
----------
buffer_or_path: buffer or path, default None
output to write into. If None, will return a json string.
indent: int, default 2
Defines the indentation of the json
Returns
-------
None, or a json string (if buffer_or_path is None).
|
oplus/epm/epm.py
|
def to_json(self, buffer_or_path=None, indent=2):
"""
Parameters
----------
buffer_or_path: buffer or path, default None
output to write into. If None, will return a json string.
indent: int, default 2
Defines the indentation of the json
Returns
-------
None, or a json string (if buffer_or_path is None).
"""
# return json
return json_data_to_json(
self.to_json_data(),
buffer_or_path=buffer_or_path,
indent=indent
)
|
def to_json(self, buffer_or_path=None, indent=2):
"""
Parameters
----------
buffer_or_path: buffer or path, default None
output to write into. If None, will return a json string.
indent: int, default 2
Defines the indentation of the json
Returns
-------
None, or a json string (if buffer_or_path is None).
"""
# return json
return json_data_to_json(
self.to_json_data(),
buffer_or_path=buffer_or_path,
indent=indent
)
|
[
"Parameters",
"----------",
"buffer_or_path",
":",
"buffer",
"or",
"path",
"default",
"None",
"output",
"to",
"write",
"into",
".",
"If",
"None",
"will",
"return",
"a",
"json",
"string",
".",
"indent",
":",
"int",
"default",
"2",
"Defines",
"the",
"indentation",
"of",
"the",
"json"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L299-L317
|
[
"def",
"to_json",
"(",
"self",
",",
"buffer_or_path",
"=",
"None",
",",
"indent",
"=",
"2",
")",
":",
"# return json",
"return",
"json_data_to_json",
"(",
"self",
".",
"to_json_data",
"(",
")",
",",
"buffer_or_path",
"=",
"buffer_or_path",
",",
"indent",
"=",
"indent",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Epm.to_idf
|
Parameters
----------
buffer_or_path: buffer or path, default None
output to write into. If None, will return a json string.
dump_external_files: boolean, default True
if True, external files will be dumped in external files directory
Returns
-------
None, or an idf string (if buffer_or_path is None).
|
oplus/epm/epm.py
|
def to_idf(self, buffer_or_path=None, dump_external_files=True):
"""
Parameters
----------
buffer_or_path: buffer or path, default None
output to write into. If None, will return a json string.
dump_external_files: boolean, default True
if True, external files will be dumped in external files directory
Returns
-------
None, or an idf string (if buffer_or_path is None).
"""
# prepare comment
comment = get_multi_line_copyright_message()
if self._comment != "":
comment += textwrap.indent(self._comment, "! ", lambda line: True)
comment += "\n\n"
# prepare external files dir path if file path
if isinstance(buffer_or_path, str):
dir_path, file_name = os.path.split(buffer_or_path)
model_name, _ = os.path.splitext(file_name)
else:
model_name, dir_path = None, os.path.curdir
# dump files if asked
if dump_external_files:
self.dump_external_files(
target_dir_path=os.path.join(dir_path, get_external_files_dir_name(model_name=model_name))
)
# prepare body
formatted_records = []
for table_ref, table in self._tables.items(): # self._tables is already sorted
formatted_records.extend([r.to_idf(model_name=model_name) for r in sorted(table)])
body = "\n\n".join(formatted_records)
# return
content = comment + body
return multi_mode_write(
lambda f: f.write(content),
lambda: content,
buffer_or_path
)
|
def to_idf(self, buffer_or_path=None, dump_external_files=True):
"""
Parameters
----------
buffer_or_path: buffer or path, default None
output to write into. If None, will return a json string.
dump_external_files: boolean, default True
if True, external files will be dumped in external files directory
Returns
-------
None, or an idf string (if buffer_or_path is None).
"""
# prepare comment
comment = get_multi_line_copyright_message()
if self._comment != "":
comment += textwrap.indent(self._comment, "! ", lambda line: True)
comment += "\n\n"
# prepare external files dir path if file path
if isinstance(buffer_or_path, str):
dir_path, file_name = os.path.split(buffer_or_path)
model_name, _ = os.path.splitext(file_name)
else:
model_name, dir_path = None, os.path.curdir
# dump files if asked
if dump_external_files:
self.dump_external_files(
target_dir_path=os.path.join(dir_path, get_external_files_dir_name(model_name=model_name))
)
# prepare body
formatted_records = []
for table_ref, table in self._tables.items(): # self._tables is already sorted
formatted_records.extend([r.to_idf(model_name=model_name) for r in sorted(table)])
body = "\n\n".join(formatted_records)
# return
content = comment + body
return multi_mode_write(
lambda f: f.write(content),
lambda: content,
buffer_or_path
)
|
[
"Parameters",
"----------",
"buffer_or_path",
":",
"buffer",
"or",
"path",
"default",
"None",
"output",
"to",
"write",
"into",
".",
"If",
"None",
"will",
"return",
"a",
"json",
"string",
".",
"dump_external_files",
":",
"boolean",
"default",
"True",
"if",
"True",
"external",
"files",
"will",
"be",
"dumped",
"in",
"external",
"files",
"directory"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L319-L363
|
[
"def",
"to_idf",
"(",
"self",
",",
"buffer_or_path",
"=",
"None",
",",
"dump_external_files",
"=",
"True",
")",
":",
"# prepare comment",
"comment",
"=",
"get_multi_line_copyright_message",
"(",
")",
"if",
"self",
".",
"_comment",
"!=",
"\"\"",
":",
"comment",
"+=",
"textwrap",
".",
"indent",
"(",
"self",
".",
"_comment",
",",
"\"! \"",
",",
"lambda",
"line",
":",
"True",
")",
"comment",
"+=",
"\"\\n\\n\"",
"# prepare external files dir path if file path",
"if",
"isinstance",
"(",
"buffer_or_path",
",",
"str",
")",
":",
"dir_path",
",",
"file_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"buffer_or_path",
")",
"model_name",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"else",
":",
"model_name",
",",
"dir_path",
"=",
"None",
",",
"os",
".",
"path",
".",
"curdir",
"# dump files if asked",
"if",
"dump_external_files",
":",
"self",
".",
"dump_external_files",
"(",
"target_dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"get_external_files_dir_name",
"(",
"model_name",
"=",
"model_name",
")",
")",
")",
"# prepare body",
"formatted_records",
"=",
"[",
"]",
"for",
"table_ref",
",",
"table",
"in",
"self",
".",
"_tables",
".",
"items",
"(",
")",
":",
"# self._tables is already sorted",
"formatted_records",
".",
"extend",
"(",
"[",
"r",
".",
"to_idf",
"(",
"model_name",
"=",
"model_name",
")",
"for",
"r",
"in",
"sorted",
"(",
"table",
")",
"]",
")",
"body",
"=",
"\"\\n\\n\"",
".",
"join",
"(",
"formatted_records",
")",
"# return",
"content",
"=",
"comment",
"+",
"body",
"return",
"multi_mode_write",
"(",
"lambda",
"f",
":",
"f",
".",
"write",
"(",
"content",
")",
",",
"lambda",
":",
"content",
",",
"buffer_or_path",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Queryset.select
|
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of queryset), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
|
oplus/epm/queryset.py
|
def select(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of queryset), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
"""
iterator = self._records if filter_by is None else filter(filter_by, self._records)
return Queryset(self._table, iterator)
|
def select(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of queryset), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
"""
iterator = self._records if filter_by is None else filter(filter_by, self._records)
return Queryset(self._table, iterator)
|
[
"Parameters",
"----------",
"filter_by",
":",
"callable",
"default",
"None",
"Callable",
"must",
"take",
"one",
"argument",
"(",
"a",
"record",
"of",
"queryset",
")",
"and",
"return",
"True",
"to",
"keep",
"record",
"or",
"False",
"to",
"skip",
"it",
".",
"Example",
":",
".",
"select",
"(",
"lambda",
"x",
":",
"x",
".",
"name",
"==",
"my_name",
")",
".",
"If",
"None",
"records",
"are",
"not",
"filtered",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/queryset.py#L94-L108
|
[
"def",
"select",
"(",
"self",
",",
"filter_by",
"=",
"None",
")",
":",
"iterator",
"=",
"self",
".",
"_records",
"if",
"filter_by",
"is",
"None",
"else",
"filter",
"(",
"filter_by",
",",
"self",
".",
"_records",
")",
"return",
"Queryset",
"(",
"self",
".",
"_table",
",",
"iterator",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Queryset.one
|
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .one(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Record instance if one and only one record is found. Else raises.
Raises
------
RecordDoesNotExistError if no record is found
MultipleRecordsReturnedError if multiple records are found
|
oplus/epm/queryset.py
|
def one(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .one(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Record instance if one and only one record is found. Else raises.
Raises
------
RecordDoesNotExistError if no record is found
MultipleRecordsReturnedError if multiple records are found
"""
# filter if needed
qs = self if filter_by is None else self.select(filter_by=filter_by)
# check one and only one
if len(qs) == 0:
raise RecordDoesNotExistError("Queryset set contains no value.")
if len(qs) > 1:
raise MultipleRecordsReturnedError("Queryset contains more than one value.")
# return record
return qs[0]
|
def one(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .one(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Record instance if one and only one record is found. Else raises.
Raises
------
RecordDoesNotExistError if no record is found
MultipleRecordsReturnedError if multiple records are found
"""
# filter if needed
qs = self if filter_by is None else self.select(filter_by=filter_by)
# check one and only one
if len(qs) == 0:
raise RecordDoesNotExistError("Queryset set contains no value.")
if len(qs) > 1:
raise MultipleRecordsReturnedError("Queryset contains more than one value.")
# return record
return qs[0]
|
[
"Parameters",
"----------",
"filter_by",
":",
"callable",
"default",
"None",
"Callable",
"must",
"take",
"one",
"argument",
"(",
"a",
"record",
"of",
"table",
")",
"and",
"return",
"True",
"to",
"keep",
"record",
"or",
"False",
"to",
"skip",
"it",
".",
"Example",
":",
".",
"one",
"(",
"lambda",
"x",
":",
"x",
".",
"name",
"==",
"my_name",
")",
".",
"If",
"None",
"records",
"are",
"not",
"filtered",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/queryset.py#L110-L138
|
[
"def",
"one",
"(",
"self",
",",
"filter_by",
"=",
"None",
")",
":",
"# filter if needed",
"qs",
"=",
"self",
"if",
"filter_by",
"is",
"None",
"else",
"self",
".",
"select",
"(",
"filter_by",
"=",
"filter_by",
")",
"# check one and only one",
"if",
"len",
"(",
"qs",
")",
"==",
"0",
":",
"raise",
"RecordDoesNotExistError",
"(",
"\"Queryset set contains no value.\"",
")",
"if",
"len",
"(",
"qs",
")",
">",
"1",
":",
"raise",
"MultipleRecordsReturnedError",
"(",
"\"Queryset contains more than one value.\"",
")",
"# return record",
"return",
"qs",
"[",
"0",
"]"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
get_simulated_epw_path
|
Returns
-------
None if epw can be anywhere
|
oplus/compatibility/epw.py
|
def get_simulated_epw_path():
"""
Returns
-------
None if epw can be anywhere
"""
from oplus import CONF # touchy imports
if OS_NAME == "windows":
return os.path.join(CONF.eplus_base_dir_path, "WeatherData", "%s.epw" % CONF.default_model_name)
|
def get_simulated_epw_path():
"""
Returns
-------
None if epw can be anywhere
"""
from oplus import CONF # touchy imports
if OS_NAME == "windows":
return os.path.join(CONF.eplus_base_dir_path, "WeatherData", "%s.epw" % CONF.default_model_name)
|
[
"Returns",
"-------",
"None",
"if",
"epw",
"can",
"be",
"anywhere"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/compatibility/epw.py#L6-L15
|
[
"def",
"get_simulated_epw_path",
"(",
")",
":",
"from",
"oplus",
"import",
"CONF",
"# touchy imports",
"if",
"OS_NAME",
"==",
"\"windows\"",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"CONF",
".",
"eplus_base_dir_path",
",",
"\"WeatherData\"",
",",
"\"%s.epw\"",
"%",
"CONF",
".",
"default_model_name",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
TableDescriptor.prepare_extensible
|
This function finishes initialization, must be called once all field descriptors and tag have been filled.
|
oplus/epm/table_descriptor.py
|
def prepare_extensible(self):
"""
This function finishes initialization, must be called once all field descriptors and tag have been filled.
"""
# see if extensible and store cycle len
for k in self._tags:
if "extensible" in k:
cycle_len = int(k.split(":")[1])
break
else:
# not extensible
return
# find cycle start and prepare patterns
cycle_start = None
cycle_patterns = []
for i, field_descriptor in enumerate(self._field_descriptors):
# quit if finished
if (cycle_start is not None) and (i >= (cycle_start + cycle_len)):
break
# set cycle start if not set yet
if (cycle_start is None) and ("begin-extensible" in field_descriptor.tags):
cycle_start = i
# leave if cycle start not reached yet
if cycle_start is None:
continue
# store pattern
cycle_patterns.append(field_descriptor.ref.replace("1", r"(\d+)"))
else:
raise RuntimeError("cycle start not found")
# detach unnecessary field descriptors
self._field_descriptors = self._field_descriptors[:cycle_start + cycle_len]
# store cycle info
self.extensible_info = (cycle_start, cycle_len, tuple(cycle_patterns))
# set field descriptor cycle_start index (for error messages while serialization)
for i, fd in enumerate(self._field_descriptors[cycle_start:]):
fd.set_extensible_info(cycle_start, cycle_len, cycle_patterns[i])
|
def prepare_extensible(self):
"""
This function finishes initialization, must be called once all field descriptors and tag have been filled.
"""
# see if extensible and store cycle len
for k in self._tags:
if "extensible" in k:
cycle_len = int(k.split(":")[1])
break
else:
# not extensible
return
# find cycle start and prepare patterns
cycle_start = None
cycle_patterns = []
for i, field_descriptor in enumerate(self._field_descriptors):
# quit if finished
if (cycle_start is not None) and (i >= (cycle_start + cycle_len)):
break
# set cycle start if not set yet
if (cycle_start is None) and ("begin-extensible" in field_descriptor.tags):
cycle_start = i
# leave if cycle start not reached yet
if cycle_start is None:
continue
# store pattern
cycle_patterns.append(field_descriptor.ref.replace("1", r"(\d+)"))
else:
raise RuntimeError("cycle start not found")
# detach unnecessary field descriptors
self._field_descriptors = self._field_descriptors[:cycle_start + cycle_len]
# store cycle info
self.extensible_info = (cycle_start, cycle_len, tuple(cycle_patterns))
# set field descriptor cycle_start index (for error messages while serialization)
for i, fd in enumerate(self._field_descriptors[cycle_start:]):
fd.set_extensible_info(cycle_start, cycle_len, cycle_patterns[i])
|
[
"This",
"function",
"finishes",
"initialization",
"must",
"be",
"called",
"once",
"all",
"field",
"descriptors",
"and",
"tag",
"have",
"been",
"filled",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/table_descriptor.py#L49-L91
|
[
"def",
"prepare_extensible",
"(",
"self",
")",
":",
"# see if extensible and store cycle len",
"for",
"k",
"in",
"self",
".",
"_tags",
":",
"if",
"\"extensible\"",
"in",
"k",
":",
"cycle_len",
"=",
"int",
"(",
"k",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
")",
"break",
"else",
":",
"# not extensible",
"return",
"# find cycle start and prepare patterns",
"cycle_start",
"=",
"None",
"cycle_patterns",
"=",
"[",
"]",
"for",
"i",
",",
"field_descriptor",
"in",
"enumerate",
"(",
"self",
".",
"_field_descriptors",
")",
":",
"# quit if finished",
"if",
"(",
"cycle_start",
"is",
"not",
"None",
")",
"and",
"(",
"i",
">=",
"(",
"cycle_start",
"+",
"cycle_len",
")",
")",
":",
"break",
"# set cycle start if not set yet",
"if",
"(",
"cycle_start",
"is",
"None",
")",
"and",
"(",
"\"begin-extensible\"",
"in",
"field_descriptor",
".",
"tags",
")",
":",
"cycle_start",
"=",
"i",
"# leave if cycle start not reached yet",
"if",
"cycle_start",
"is",
"None",
":",
"continue",
"# store pattern",
"cycle_patterns",
".",
"append",
"(",
"field_descriptor",
".",
"ref",
".",
"replace",
"(",
"\"1\"",
",",
"r\"(\\d+)\"",
")",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"cycle start not found\"",
")",
"# detach unnecessary field descriptors",
"self",
".",
"_field_descriptors",
"=",
"self",
".",
"_field_descriptors",
"[",
":",
"cycle_start",
"+",
"cycle_len",
"]",
"# store cycle info",
"self",
".",
"extensible_info",
"=",
"(",
"cycle_start",
",",
"cycle_len",
",",
"tuple",
"(",
"cycle_patterns",
")",
")",
"# set field descriptor cycle_start index (for error messages while serialization)",
"for",
"i",
",",
"fd",
"in",
"enumerate",
"(",
"self",
".",
"_field_descriptors",
"[",
"cycle_start",
":",
"]",
")",
":",
"fd",
".",
"set_extensible_info",
"(",
"cycle_start",
",",
"cycle_len",
",",
"cycle_patterns",
"[",
"i",
"]",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
TableDescriptor.get_field_reduced_index
|
reduced index: modulo of extensible has been applied
|
oplus/epm/table_descriptor.py
|
def get_field_reduced_index(self, index):
"""
reduced index: modulo of extensible has been applied
"""
# return index if not extensible
if self.extensible_info is None:
return index
# manage extensible
cycle_start, cycle_len, _ = self.extensible_info
# base field
if index < cycle_start:
return index
# extensible field
return cycle_start + ((index - cycle_start) % cycle_len)
|
def get_field_reduced_index(self, index):
"""
reduced index: modulo of extensible has been applied
"""
# return index if not extensible
if self.extensible_info is None:
return index
# manage extensible
cycle_start, cycle_len, _ = self.extensible_info
# base field
if index < cycle_start:
return index
# extensible field
return cycle_start + ((index - cycle_start) % cycle_len)
|
[
"reduced",
"index",
":",
"modulo",
"of",
"extensible",
"has",
"been",
"applied"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/table_descriptor.py#L125-L141
|
[
"def",
"get_field_reduced_index",
"(",
"self",
",",
"index",
")",
":",
"# return index if not extensible",
"if",
"self",
".",
"extensible_info",
"is",
"None",
":",
"return",
"index",
"# manage extensible",
"cycle_start",
",",
"cycle_len",
",",
"_",
"=",
"self",
".",
"extensible_info",
"# base field",
"if",
"index",
"<",
"cycle_start",
":",
"return",
"index",
"# extensible field",
"return",
"cycle_start",
"+",
"(",
"(",
"index",
"-",
"cycle_start",
")",
"%",
"cycle_len",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
TableDescriptor.get_extended_name
|
manages extensible names
|
oplus/epm/table_descriptor.py
|
def get_extended_name(self, index):
"""
manages extensible names
"""
field_descriptor = self.get_field_descriptor(index)
if self.extensible_info is None:
return field_descriptor.name
cycle_start, cycle_len, _ = self.extensible_info
cycle_num = (index - cycle_start) // cycle_len
return None if field_descriptor.name is None else field_descriptor.name.replace("1", str(cycle_num))
|
def get_extended_name(self, index):
"""
manages extensible names
"""
field_descriptor = self.get_field_descriptor(index)
if self.extensible_info is None:
return field_descriptor.name
cycle_start, cycle_len, _ = self.extensible_info
cycle_num = (index - cycle_start) // cycle_len
return None if field_descriptor.name is None else field_descriptor.name.replace("1", str(cycle_num))
|
[
"manages",
"extensible",
"names"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/table_descriptor.py#L146-L155
|
[
"def",
"get_extended_name",
"(",
"self",
",",
"index",
")",
":",
"field_descriptor",
"=",
"self",
".",
"get_field_descriptor",
"(",
"index",
")",
"if",
"self",
".",
"extensible_info",
"is",
"None",
":",
"return",
"field_descriptor",
".",
"name",
"cycle_start",
",",
"cycle_len",
",",
"_",
"=",
"self",
".",
"extensible_info",
"cycle_num",
"=",
"(",
"index",
"-",
"cycle_start",
")",
"//",
"cycle_len",
"return",
"None",
"if",
"field_descriptor",
".",
"name",
"is",
"None",
"else",
"field_descriptor",
".",
"name",
".",
"replace",
"(",
"\"1\"",
",",
"str",
"(",
"cycle_num",
")",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
FieldDescriptor.deserialize
|
index is used for extensible fields error messages (if given)
|
oplus/epm/field_descriptor.py
|
def deserialize(self, value, index):
"""
index is used for extensible fields error messages (if given)
"""
# -- serialize if not raw type
# transform to string if external file
if isinstance(value, ExternalFile):
value = value.pointer
# transform to string if record
if isinstance(value, Record):
try:
value = value[0]
except IndexError:
raise ValueError("can't set given record because it does not have a name field")
# -- prepare if string
if isinstance(value, str):
# change multiple spaces to mono spaces
value = re.sub(spaces_and_newlines_pattern, lambda x: " ", value.strip())
# see if still not empty
if value == "":
return None
# make ASCII compatible
value = unidecode.unidecode(value)
# make lower case if not retaincase
if "retaincase" not in self.tags:
value = value.lower()
# check not too big
if len(value) >= 100:
raise FieldValidationError(
f"Field has more than 100 characters which is the limit. "
f"{self.get_error_location_message(value, index=index)}"
)
# transform to external file if relevant
if self.is_file_name:
value = ExternalFile.deserialize(value)
# -- deserialize
# numeric types
if self.detailed_type in ("integer", "real"):
# manage none
if value is None:
return None
# special values: auto-calculate, auto-size, use-weather-file
if value in ("autocalculate", "autosize", "useweatherfile"):
return value
if self.detailed_type == "integer":
try:
return int(value)
except:
raise FieldValidationError(
f"Couldn't parse to integer. {self.get_error_location_message(value, index=index)}"
)
try:
return float(value)
except:
raise FieldValidationError(
f"Couldn't parse to float. {self.get_error_location_message(value, index=index)}"
)
# simple string types
if self.detailed_type in ("alpha", "choice", "node", "external-list"):
# manage none
if value is None:
return None
# ensure it was str
if not isinstance_str(value):
raise FieldValidationError(
f"Value must be a string. {self.get_error_location_message(value, index=index)}"
)
return value
# manage hooks (eplus reference)
if self.detailed_type == "reference":
# manage None
if value is None:
return NONE_RECORD_HOOK
# reference class name appears in v9.0.1
references = self.tags.get("reference", [])
# table_name, index, value, references, class_references
return RecordHook(references, index, value)
# manage links (eplus object-list)
if self.detailed_type == "object-list":
# manage None
if value is None:
return NONE_LINK
return Link(self.tags["object-list"], value, index)
raise RuntimeError("should not be here")
|
def deserialize(self, value, index):
"""
index is used for extensible fields error messages (if given)
"""
# -- serialize if not raw type
# transform to string if external file
if isinstance(value, ExternalFile):
value = value.pointer
# transform to string if record
if isinstance(value, Record):
try:
value = value[0]
except IndexError:
raise ValueError("can't set given record because it does not have a name field")
# -- prepare if string
if isinstance(value, str):
# change multiple spaces to mono spaces
value = re.sub(spaces_and_newlines_pattern, lambda x: " ", value.strip())
# see if still not empty
if value == "":
return None
# make ASCII compatible
value = unidecode.unidecode(value)
# make lower case if not retaincase
if "retaincase" not in self.tags:
value = value.lower()
# check not too big
if len(value) >= 100:
raise FieldValidationError(
f"Field has more than 100 characters which is the limit. "
f"{self.get_error_location_message(value, index=index)}"
)
# transform to external file if relevant
if self.is_file_name:
value = ExternalFile.deserialize(value)
# -- deserialize
# numeric types
if self.detailed_type in ("integer", "real"):
# manage none
if value is None:
return None
# special values: auto-calculate, auto-size, use-weather-file
if value in ("autocalculate", "autosize", "useweatherfile"):
return value
if self.detailed_type == "integer":
try:
return int(value)
except:
raise FieldValidationError(
f"Couldn't parse to integer. {self.get_error_location_message(value, index=index)}"
)
try:
return float(value)
except:
raise FieldValidationError(
f"Couldn't parse to float. {self.get_error_location_message(value, index=index)}"
)
# simple string types
if self.detailed_type in ("alpha", "choice", "node", "external-list"):
# manage none
if value is None:
return None
# ensure it was str
if not isinstance_str(value):
raise FieldValidationError(
f"Value must be a string. {self.get_error_location_message(value, index=index)}"
)
return value
# manage hooks (eplus reference)
if self.detailed_type == "reference":
# manage None
if value is None:
return NONE_RECORD_HOOK
# reference class name appears in v9.0.1
references = self.tags.get("reference", [])
# table_name, index, value, references, class_references
return RecordHook(references, index, value)
# manage links (eplus object-list)
if self.detailed_type == "object-list":
# manage None
if value is None:
return NONE_LINK
return Link(self.tags["object-list"], value, index)
raise RuntimeError("should not be here")
|
[
"index",
"is",
"used",
"for",
"extensible",
"fields",
"error",
"messages",
"(",
"if",
"given",
")"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/field_descriptor.py#L57-L159
|
[
"def",
"deserialize",
"(",
"self",
",",
"value",
",",
"index",
")",
":",
"# -- serialize if not raw type",
"# transform to string if external file",
"if",
"isinstance",
"(",
"value",
",",
"ExternalFile",
")",
":",
"value",
"=",
"value",
".",
"pointer",
"# transform to string if record",
"if",
"isinstance",
"(",
"value",
",",
"Record",
")",
":",
"try",
":",
"value",
"=",
"value",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"\"can't set given record because it does not have a name field\"",
")",
"# -- prepare if string",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"# change multiple spaces to mono spaces",
"value",
"=",
"re",
".",
"sub",
"(",
"spaces_and_newlines_pattern",
",",
"lambda",
"x",
":",
"\" \"",
",",
"value",
".",
"strip",
"(",
")",
")",
"# see if still not empty",
"if",
"value",
"==",
"\"\"",
":",
"return",
"None",
"# make ASCII compatible",
"value",
"=",
"unidecode",
".",
"unidecode",
"(",
"value",
")",
"# make lower case if not retaincase",
"if",
"\"retaincase\"",
"not",
"in",
"self",
".",
"tags",
":",
"value",
"=",
"value",
".",
"lower",
"(",
")",
"# check not too big",
"if",
"len",
"(",
"value",
")",
">=",
"100",
":",
"raise",
"FieldValidationError",
"(",
"f\"Field has more than 100 characters which is the limit. \"",
"f\"{self.get_error_location_message(value, index=index)}\"",
")",
"# transform to external file if relevant",
"if",
"self",
".",
"is_file_name",
":",
"value",
"=",
"ExternalFile",
".",
"deserialize",
"(",
"value",
")",
"# -- deserialize",
"# numeric types",
"if",
"self",
".",
"detailed_type",
"in",
"(",
"\"integer\"",
",",
"\"real\"",
")",
":",
"# manage none",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"# special values: auto-calculate, auto-size, use-weather-file",
"if",
"value",
"in",
"(",
"\"autocalculate\"",
",",
"\"autosize\"",
",",
"\"useweatherfile\"",
")",
":",
"return",
"value",
"if",
"self",
".",
"detailed_type",
"==",
"\"integer\"",
":",
"try",
":",
"return",
"int",
"(",
"value",
")",
"except",
":",
"raise",
"FieldValidationError",
"(",
"f\"Couldn't parse to integer. {self.get_error_location_message(value, index=index)}\"",
")",
"try",
":",
"return",
"float",
"(",
"value",
")",
"except",
":",
"raise",
"FieldValidationError",
"(",
"f\"Couldn't parse to float. {self.get_error_location_message(value, index=index)}\"",
")",
"# simple string types",
"if",
"self",
".",
"detailed_type",
"in",
"(",
"\"alpha\"",
",",
"\"choice\"",
",",
"\"node\"",
",",
"\"external-list\"",
")",
":",
"# manage none",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"# ensure it was str",
"if",
"not",
"isinstance_str",
"(",
"value",
")",
":",
"raise",
"FieldValidationError",
"(",
"f\"Value must be a string. {self.get_error_location_message(value, index=index)}\"",
")",
"return",
"value",
"# manage hooks (eplus reference)",
"if",
"self",
".",
"detailed_type",
"==",
"\"reference\"",
":",
"# manage None",
"if",
"value",
"is",
"None",
":",
"return",
"NONE_RECORD_HOOK",
"# reference class name appears in v9.0.1",
"references",
"=",
"self",
".",
"tags",
".",
"get",
"(",
"\"reference\"",
",",
"[",
"]",
")",
"# table_name, index, value, references, class_references",
"return",
"RecordHook",
"(",
"references",
",",
"index",
",",
"value",
")",
"# manage links (eplus object-list)",
"if",
"self",
".",
"detailed_type",
"==",
"\"object-list\"",
":",
"# manage None",
"if",
"value",
"is",
"None",
":",
"return",
"NONE_LINK",
"return",
"Link",
"(",
"self",
".",
"tags",
"[",
"\"object-list\"",
"]",
",",
"value",
",",
"index",
")",
"raise",
"RuntimeError",
"(",
"\"should not be here\"",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
FieldDescriptor.detailed_type
|
Uses EPlus double approach of type ('type' tag, and/or 'key', 'object-list', 'external-list', 'reference' tags)
to determine detailed type.
Returns
-------
"integer", "real", "alpha", "choice", "reference", "object-list", "external-list", "node"
|
oplus/epm/field_descriptor.py
|
def detailed_type(self):
"""
Uses EPlus double approach of type ('type' tag, and/or 'key', 'object-list', 'external-list', 'reference' tags)
to determine detailed type.
Returns
-------
"integer", "real", "alpha", "choice", "reference", "object-list", "external-list", "node"
"""
if self._detailed_type is None:
if ("reference" in self.tags) or ("reference-class-name" in self.tags):
self._detailed_type = "reference"
elif "type" in self.tags:
self._detailed_type = self.tags["type"][0].lower() # idd is not very rigorous on case
elif "key" in self.tags:
self._detailed_type = "choice"
elif "object-list" in self.tags:
self._detailed_type = "object-list"
elif "external-list" in self.tags:
self._detailed_type = "external-list"
elif self.basic_type == "A":
self._detailed_type = "alpha"
elif self.basic_type == "N":
self._detailed_type = "real"
else:
raise ValueError("Can't find detailed type.")
return self._detailed_type
|
def detailed_type(self):
"""
Uses EPlus double approach of type ('type' tag, and/or 'key', 'object-list', 'external-list', 'reference' tags)
to determine detailed type.
Returns
-------
"integer", "real", "alpha", "choice", "reference", "object-list", "external-list", "node"
"""
if self._detailed_type is None:
if ("reference" in self.tags) or ("reference-class-name" in self.tags):
self._detailed_type = "reference"
elif "type" in self.tags:
self._detailed_type = self.tags["type"][0].lower() # idd is not very rigorous on case
elif "key" in self.tags:
self._detailed_type = "choice"
elif "object-list" in self.tags:
self._detailed_type = "object-list"
elif "external-list" in self.tags:
self._detailed_type = "external-list"
elif self.basic_type == "A":
self._detailed_type = "alpha"
elif self.basic_type == "N":
self._detailed_type = "real"
else:
raise ValueError("Can't find detailed type.")
return self._detailed_type
|
[
"Uses",
"EPlus",
"double",
"approach",
"of",
"type",
"(",
"type",
"tag",
"and",
"/",
"or",
"key",
"object",
"-",
"list",
"external",
"-",
"list",
"reference",
"tags",
")",
"to",
"determine",
"detailed",
"type",
".",
"Returns",
"-------",
"integer",
"real",
"alpha",
"choice",
"reference",
"object",
"-",
"list",
"external",
"-",
"list",
"node"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/field_descriptor.py#L176-L202
|
[
"def",
"detailed_type",
"(",
"self",
")",
":",
"if",
"self",
".",
"_detailed_type",
"is",
"None",
":",
"if",
"(",
"\"reference\"",
"in",
"self",
".",
"tags",
")",
"or",
"(",
"\"reference-class-name\"",
"in",
"self",
".",
"tags",
")",
":",
"self",
".",
"_detailed_type",
"=",
"\"reference\"",
"elif",
"\"type\"",
"in",
"self",
".",
"tags",
":",
"self",
".",
"_detailed_type",
"=",
"self",
".",
"tags",
"[",
"\"type\"",
"]",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"# idd is not very rigorous on case",
"elif",
"\"key\"",
"in",
"self",
".",
"tags",
":",
"self",
".",
"_detailed_type",
"=",
"\"choice\"",
"elif",
"\"object-list\"",
"in",
"self",
".",
"tags",
":",
"self",
".",
"_detailed_type",
"=",
"\"object-list\"",
"elif",
"\"external-list\"",
"in",
"self",
".",
"tags",
":",
"self",
".",
"_detailed_type",
"=",
"\"external-list\"",
"elif",
"self",
".",
"basic_type",
"==",
"\"A\"",
":",
"self",
".",
"_detailed_type",
"=",
"\"alpha\"",
"elif",
"self",
".",
"basic_type",
"==",
"\"N\"",
":",
"self",
".",
"_detailed_type",
"=",
"\"real\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"Can't find detailed type.\"",
")",
"return",
"self",
".",
"_detailed_type"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
ExternalFilesManager.short_refs
|
we calculate on the fly to avoid managing registrations and un-registrations
Returns
-------
{ref: short_ref, ...
|
oplus/epm/external_files_manager.py
|
def short_refs(self):
"""
we calculate on the fly to avoid managing registrations and un-registrations
Returns
-------
{ref: short_ref, ...
"""
naive_short_refs_d = dict() # naive_short_ref: {refs, ...}
for ef in self._external_files:
if ef.naive_short_ref not in naive_short_refs_d:
naive_short_refs_d[ef.naive_short_ref] = set()
naive_short_refs_d[ef.naive_short_ref].add(ef.ref)
short_refs = dict()
for naive_short_ref, refs in naive_short_refs_d.items():
if len(refs) == 1:
short_refs[refs.pop()] = naive_short_ref
continue
base, ext = os.path.splitext(naive_short_ref)
for i, ref in enumerate(sorted(refs)):
short_refs[ref] = f"{base}-{i}.{ext}"
return short_refs
|
def short_refs(self):
"""
we calculate on the fly to avoid managing registrations and un-registrations
Returns
-------
{ref: short_ref, ...
"""
naive_short_refs_d = dict() # naive_short_ref: {refs, ...}
for ef in self._external_files:
if ef.naive_short_ref not in naive_short_refs_d:
naive_short_refs_d[ef.naive_short_ref] = set()
naive_short_refs_d[ef.naive_short_ref].add(ef.ref)
short_refs = dict()
for naive_short_ref, refs in naive_short_refs_d.items():
if len(refs) == 1:
short_refs[refs.pop()] = naive_short_ref
continue
base, ext = os.path.splitext(naive_short_ref)
for i, ref in enumerate(sorted(refs)):
short_refs[ref] = f"{base}-{i}.{ext}"
return short_refs
|
[
"we",
"calculate",
"on",
"the",
"fly",
"to",
"avoid",
"managing",
"registrations",
"and",
"un",
"-",
"registrations"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/external_files_manager.py#L19-L42
|
[
"def",
"short_refs",
"(",
"self",
")",
":",
"naive_short_refs_d",
"=",
"dict",
"(",
")",
"# naive_short_ref: {refs, ...}",
"for",
"ef",
"in",
"self",
".",
"_external_files",
":",
"if",
"ef",
".",
"naive_short_ref",
"not",
"in",
"naive_short_refs_d",
":",
"naive_short_refs_d",
"[",
"ef",
".",
"naive_short_ref",
"]",
"=",
"set",
"(",
")",
"naive_short_refs_d",
"[",
"ef",
".",
"naive_short_ref",
"]",
".",
"add",
"(",
"ef",
".",
"ref",
")",
"short_refs",
"=",
"dict",
"(",
")",
"for",
"naive_short_ref",
",",
"refs",
"in",
"naive_short_refs_d",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"refs",
")",
"==",
"1",
":",
"short_refs",
"[",
"refs",
".",
"pop",
"(",
")",
"]",
"=",
"naive_short_ref",
"continue",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"naive_short_ref",
")",
"for",
"i",
",",
"ref",
"in",
"enumerate",
"(",
"sorted",
"(",
"refs",
")",
")",
":",
"short_refs",
"[",
"ref",
"]",
"=",
"f\"{base}-{i}.{ext}\"",
"return",
"short_refs"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
EioTable.get_value
|
Returns first occurrence of value of filter column matching filter criterion.
|
oplus/eio.py
|
def get_value(self, column_name_or_i, filter_column_name_or_i, filter_criterion):
"""
Returns first occurrence of value of filter column matching filter criterion.
"""
# find column indexes
column_i = self._get_column_index(column_name_or_i)
filter_column_i = self._get_column_index(filter_column_name_or_i)
filter_fct = {
float: lambda x: float(x) == filter_criterion,
int: lambda x: int(x) == filter_criterion,
str: lambda x: x.lower() == filter_criterion.lower()
}[type(filter_criterion)]
for row_i, row in enumerate(self._data):
if filter_fct(row[filter_column_i]):
break
else:
raise ValueError("Filter did not return any values.")
return self._data[row_i][column_i]
|
def get_value(self, column_name_or_i, filter_column_name_or_i, filter_criterion):
"""
Returns first occurrence of value of filter column matching filter criterion.
"""
# find column indexes
column_i = self._get_column_index(column_name_or_i)
filter_column_i = self._get_column_index(filter_column_name_or_i)
filter_fct = {
float: lambda x: float(x) == filter_criterion,
int: lambda x: int(x) == filter_criterion,
str: lambda x: x.lower() == filter_criterion.lower()
}[type(filter_criterion)]
for row_i, row in enumerate(self._data):
if filter_fct(row[filter_column_i]):
break
else:
raise ValueError("Filter did not return any values.")
return self._data[row_i][column_i]
|
[
"Returns",
"first",
"occurrence",
"of",
"value",
"of",
"filter",
"column",
"matching",
"filter",
"criterion",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/eio.py#L109-L129
|
[
"def",
"get_value",
"(",
"self",
",",
"column_name_or_i",
",",
"filter_column_name_or_i",
",",
"filter_criterion",
")",
":",
"# find column indexes",
"column_i",
"=",
"self",
".",
"_get_column_index",
"(",
"column_name_or_i",
")",
"filter_column_i",
"=",
"self",
".",
"_get_column_index",
"(",
"filter_column_name_or_i",
")",
"filter_fct",
"=",
"{",
"float",
":",
"lambda",
"x",
":",
"float",
"(",
"x",
")",
"==",
"filter_criterion",
",",
"int",
":",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
"==",
"filter_criterion",
",",
"str",
":",
"lambda",
"x",
":",
"x",
".",
"lower",
"(",
")",
"==",
"filter_criterion",
".",
"lower",
"(",
")",
"}",
"[",
"type",
"(",
"filter_criterion",
")",
"]",
"for",
"row_i",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"_data",
")",
":",
"if",
"filter_fct",
"(",
"row",
"[",
"filter_column_i",
"]",
")",
":",
"break",
"else",
":",
"raise",
"ValueError",
"(",
"\"Filter did not return any values.\"",
")",
"return",
"self",
".",
"_data",
"[",
"row_i",
"]",
"[",
"column_i",
"]"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record._update_value_inert
|
is only called by _update_inert
|
oplus/epm/record.py
|
def _update_value_inert(self, index, value):
"""
is only called by _update_inert
"""
# get field descriptor
field_descriptor = self._table._dev_descriptor.get_field_descriptor(index)
# prepare value
value = field_descriptor.deserialize(value, index)
# unregister previous link if relevant
if isinstance(value, Link):
# de-activate current link if any
current_link = self._data.get(index)
if current_link is not None:
current_link.unregister()
# unregister previous hook if relevant
if isinstance(value, RecordHook):
current_record_hook = self._data.get(index)
if current_record_hook is not None:
current_record_hook.unregister()
# unregister previous external file if relevant
if isinstance(value, ExternalFile):
current_external_file = self._data.get(index)
if current_external_file is not None:
current_external_file._dev_unregister()
# if None remove and leave
if value in (None, NONE_RECORD_HOOK, NONE_LINK, NONE_EXTERNAL_FILE):
# we don't check required, because this method is called by _update_inert which does the job
self._dev_set_none_without_unregistering(index, check_not_required=False)
return
# if relevant, store current pk to signal table
old_hook = None
if index == 0 and not self._table._dev_auto_pk:
old_hook = self._data.get(0) # we use get, because record may not have a pk yet if it is being created
# set value
self._data[index] = value
# signal pk update if relevant
if old_hook is not None:
self._table._dev_record_pk_was_updated(old_hook.target_value)
|
def _update_value_inert(self, index, value):
"""
is only called by _update_inert
"""
# get field descriptor
field_descriptor = self._table._dev_descriptor.get_field_descriptor(index)
# prepare value
value = field_descriptor.deserialize(value, index)
# unregister previous link if relevant
if isinstance(value, Link):
# de-activate current link if any
current_link = self._data.get(index)
if current_link is not None:
current_link.unregister()
# unregister previous hook if relevant
if isinstance(value, RecordHook):
current_record_hook = self._data.get(index)
if current_record_hook is not None:
current_record_hook.unregister()
# unregister previous external file if relevant
if isinstance(value, ExternalFile):
current_external_file = self._data.get(index)
if current_external_file is not None:
current_external_file._dev_unregister()
# if None remove and leave
if value in (None, NONE_RECORD_HOOK, NONE_LINK, NONE_EXTERNAL_FILE):
# we don't check required, because this method is called by _update_inert which does the job
self._dev_set_none_without_unregistering(index, check_not_required=False)
return
# if relevant, store current pk to signal table
old_hook = None
if index == 0 and not self._table._dev_auto_pk:
old_hook = self._data.get(0) # we use get, because record may not have a pk yet if it is being created
# set value
self._data[index] = value
# signal pk update if relevant
if old_hook is not None:
self._table._dev_record_pk_was_updated(old_hook.target_value)
|
[
"is",
"only",
"called",
"by",
"_update_inert"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L85-L130
|
[
"def",
"_update_value_inert",
"(",
"self",
",",
"index",
",",
"value",
")",
":",
"# get field descriptor",
"field_descriptor",
"=",
"self",
".",
"_table",
".",
"_dev_descriptor",
".",
"get_field_descriptor",
"(",
"index",
")",
"# prepare value",
"value",
"=",
"field_descriptor",
".",
"deserialize",
"(",
"value",
",",
"index",
")",
"# unregister previous link if relevant",
"if",
"isinstance",
"(",
"value",
",",
"Link",
")",
":",
"# de-activate current link if any",
"current_link",
"=",
"self",
".",
"_data",
".",
"get",
"(",
"index",
")",
"if",
"current_link",
"is",
"not",
"None",
":",
"current_link",
".",
"unregister",
"(",
")",
"# unregister previous hook if relevant",
"if",
"isinstance",
"(",
"value",
",",
"RecordHook",
")",
":",
"current_record_hook",
"=",
"self",
".",
"_data",
".",
"get",
"(",
"index",
")",
"if",
"current_record_hook",
"is",
"not",
"None",
":",
"current_record_hook",
".",
"unregister",
"(",
")",
"# unregister previous external file if relevant",
"if",
"isinstance",
"(",
"value",
",",
"ExternalFile",
")",
":",
"current_external_file",
"=",
"self",
".",
"_data",
".",
"get",
"(",
"index",
")",
"if",
"current_external_file",
"is",
"not",
"None",
":",
"current_external_file",
".",
"_dev_unregister",
"(",
")",
"# if None remove and leave",
"if",
"value",
"in",
"(",
"None",
",",
"NONE_RECORD_HOOK",
",",
"NONE_LINK",
",",
"NONE_EXTERNAL_FILE",
")",
":",
"# we don't check required, because this method is called by _update_inert which does the job",
"self",
".",
"_dev_set_none_without_unregistering",
"(",
"index",
",",
"check_not_required",
"=",
"False",
")",
"return",
"# if relevant, store current pk to signal table",
"old_hook",
"=",
"None",
"if",
"index",
"==",
"0",
"and",
"not",
"self",
".",
"_table",
".",
"_dev_auto_pk",
":",
"old_hook",
"=",
"self",
".",
"_data",
".",
"get",
"(",
"0",
")",
"# we use get, because record may not have a pk yet if it is being created",
"# set value",
"self",
".",
"_data",
"[",
"index",
"]",
"=",
"value",
"# signal pk update if relevant",
"if",
"old_hook",
"is",
"not",
"None",
":",
"self",
".",
"_table",
".",
"_dev_record_pk_was_updated",
"(",
"old_hook",
".",
"target_value",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.get_serialized_value
|
Parameters
----------
ref_or_index
external_files_mode: str, default 'path'
'path', 'pointer'
model_file_path: str, default None
if external files are asked in a relative fashion, relative path will be calculated relatively to
model_file_path if given, else current directory
Returns
-------
serialized value (only basic types: string, int, float, None, ...)
|
oplus/epm/record.py
|
def get_serialized_value(self, ref_or_index, model_name=None):
"""
Parameters
----------
ref_or_index
external_files_mode: str, default 'path'
'path', 'pointer'
model_file_path: str, default None
if external files are asked in a relative fashion, relative path will be calculated relatively to
model_file_path if given, else current directory
Returns
-------
serialized value (only basic types: string, int, float, None, ...)
"""
index = (
self._table._dev_descriptor.get_field_index(ref_or_index) if isinstance(ref_or_index, str)
else ref_or_index
)
# get value
value = self._data.get(index)
# serialize
value = value.serialize() if isinstance(value, (Link, RecordHook)) else value
# manage file names
if isinstance(value, ExternalFile):
value = os.path.join(get_external_files_dir_name(model_name=model_name), value.naive_short_ref)
return value
|
def get_serialized_value(self, ref_or_index, model_name=None):
"""
Parameters
----------
ref_or_index
external_files_mode: str, default 'path'
'path', 'pointer'
model_file_path: str, default None
if external files are asked in a relative fashion, relative path will be calculated relatively to
model_file_path if given, else current directory
Returns
-------
serialized value (only basic types: string, int, float, None, ...)
"""
index = (
self._table._dev_descriptor.get_field_index(ref_or_index) if isinstance(ref_or_index, str)
else ref_or_index
)
# get value
value = self._data.get(index)
# serialize
value = value.serialize() if isinstance(value, (Link, RecordHook)) else value
# manage file names
if isinstance(value, ExternalFile):
value = os.path.join(get_external_files_dir_name(model_name=model_name), value.naive_short_ref)
return value
|
[
"Parameters",
"----------",
"ref_or_index",
"external_files_mode",
":",
"str",
"default",
"path",
"path",
"pointer",
"model_file_path",
":",
"str",
"default",
"None",
"if",
"external",
"files",
"are",
"asked",
"in",
"a",
"relative",
"fashion",
"relative",
"path",
"will",
"be",
"calculated",
"relatively",
"to",
"model_file_path",
"if",
"given",
"else",
"current",
"directory"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L355-L385
|
[
"def",
"get_serialized_value",
"(",
"self",
",",
"ref_or_index",
",",
"model_name",
"=",
"None",
")",
":",
"index",
"=",
"(",
"self",
".",
"_table",
".",
"_dev_descriptor",
".",
"get_field_index",
"(",
"ref_or_index",
")",
"if",
"isinstance",
"(",
"ref_or_index",
",",
"str",
")",
"else",
"ref_or_index",
")",
"# get value",
"value",
"=",
"self",
".",
"_data",
".",
"get",
"(",
"index",
")",
"# serialize",
"value",
"=",
"value",
".",
"serialize",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"(",
"Link",
",",
"RecordHook",
")",
")",
"else",
"value",
"# manage file names",
"if",
"isinstance",
"(",
"value",
",",
"ExternalFile",
")",
":",
"value",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_external_files_dir_name",
"(",
"model_name",
"=",
"model_name",
")",
",",
"value",
".",
"naive_short_ref",
")",
"return",
"value"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.get_external_files
|
Returns
-------
List of ExternalFiles instances contained by record.
|
oplus/epm/record.py
|
def get_external_files(self):
"""
Returns
-------
List of ExternalFiles instances contained by record.
"""
return [v for v in self._data.values() if isinstance(v, ExternalFile)]
|
def get_external_files(self):
"""
Returns
-------
List of ExternalFiles instances contained by record.
"""
return [v for v in self._data.values() if isinstance(v, ExternalFile)]
|
[
"Returns",
"-------",
"List",
"of",
"ExternalFiles",
"instances",
"contained",
"by",
"record",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L403-L409
|
[
"def",
"get_external_files",
"(",
"self",
")",
":",
"return",
"[",
"v",
"for",
"v",
"in",
"self",
".",
"_data",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"v",
",",
"ExternalFile",
")",
"]"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.update
|
Updates simultaneously all given fields.
Parameters
----------
data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax)
or_data: keyword arguments containing field names as keys (kwargs syntax)
|
oplus/epm/record.py
|
def update(self, data=None, **or_data):
"""
Updates simultaneously all given fields.
Parameters
----------
data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax)
or_data: keyword arguments containing field names as keys (kwargs syntax)
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. add inert
# * data is checked
# * old links are unregistered
# * record is stored in table (=> pk uniqueness is checked)
# 2. activate: hooks, links, external files
data = or_data if data is None else data
self._update_inert(data)
self._dev_activate_hooks()
self._dev_activate_links()
self._dev_activate_external_files()
|
def update(self, data=None, **or_data):
"""
Updates simultaneously all given fields.
Parameters
----------
data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax)
or_data: keyword arguments containing field names as keys (kwargs syntax)
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. add inert
# * data is checked
# * old links are unregistered
# * record is stored in table (=> pk uniqueness is checked)
# 2. activate: hooks, links, external files
data = or_data if data is None else data
self._update_inert(data)
self._dev_activate_hooks()
self._dev_activate_links()
self._dev_activate_external_files()
|
[
"Updates",
"simultaneously",
"all",
"given",
"fields",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L412-L438
|
[
"def",
"update",
"(",
"self",
",",
"data",
"=",
"None",
",",
"*",
"*",
"or_data",
")",
":",
"# workflow",
"# --------",
"# (methods belonging to create/update/delete framework:",
"# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)",
"# 1. add inert",
"# * data is checked",
"# * old links are unregistered",
"# * record is stored in table (=> pk uniqueness is checked)",
"# 2. activate: hooks, links, external files",
"data",
"=",
"or_data",
"if",
"data",
"is",
"None",
"else",
"data",
"self",
".",
"_update_inert",
"(",
"data",
")",
"self",
".",
"_dev_activate_hooks",
"(",
")",
"self",
".",
"_dev_activate_links",
"(",
")",
"self",
".",
"_dev_activate_external_files",
"(",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.copy
|
Parameters
----------
new_name: str, default None
record's new name (if table has a name). If None although record has a name, a random uuid will be given.
Returns
-------
Copied record.
|
oplus/epm/record.py
|
def copy(self, new_name=None):
"""
Parameters
----------
new_name: str, default None
record's new name (if table has a name). If None although record has a name, a random uuid will be given.
Returns
-------
Copied record.
"""
# todo: check this really works, !! must not use same link, hook, external_file, ... for different records !!
# auto pk tables can just be copied
if self._table._dev_auto_pk:
return self._table.add(self._data)
# for ref pk tables, must manage name
name = str(uuid.uuid4()) if new_name is None else new_name
new_data = dict((k, name if k == 0 else v) for (k, v) in self._data.items())
return self._table.add(new_data)
|
def copy(self, new_name=None):
"""
Parameters
----------
new_name: str, default None
record's new name (if table has a name). If None although record has a name, a random uuid will be given.
Returns
-------
Copied record.
"""
# todo: check this really works, !! must not use same link, hook, external_file, ... for different records !!
# auto pk tables can just be copied
if self._table._dev_auto_pk:
return self._table.add(self._data)
# for ref pk tables, must manage name
name = str(uuid.uuid4()) if new_name is None else new_name
new_data = dict((k, name if k == 0 else v) for (k, v) in self._data.items())
return self._table.add(new_data)
|
[
"Parameters",
"----------",
"new_name",
":",
"str",
"default",
"None",
"record",
"s",
"new",
"name",
"(",
"if",
"table",
"has",
"a",
"name",
")",
".",
"If",
"None",
"although",
"record",
"has",
"a",
"name",
"a",
"random",
"uuid",
"will",
"be",
"given",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L440-L459
|
[
"def",
"copy",
"(",
"self",
",",
"new_name",
"=",
"None",
")",
":",
"# todo: check this really works, !! must not use same link, hook, external_file, ... for different records !!",
"# auto pk tables can just be copied",
"if",
"self",
".",
"_table",
".",
"_dev_auto_pk",
":",
"return",
"self",
".",
"_table",
".",
"add",
"(",
"self",
".",
"_data",
")",
"# for ref pk tables, must manage name",
"name",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"if",
"new_name",
"is",
"None",
"else",
"new_name",
"new_data",
"=",
"dict",
"(",
"(",
"k",
",",
"name",
"if",
"k",
"==",
"0",
"else",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"self",
".",
"_data",
".",
"items",
"(",
")",
")",
"return",
"self",
".",
"_table",
".",
"add",
"(",
"new_data",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.set_defaults
|
sets all empty fields for which a default value is defined to default value
|
oplus/epm/record.py
|
def set_defaults(self):
"""
sets all empty fields for which a default value is defined to default value
"""
defaults = {}
for i in range(len(self)):
if i in self._data:
continue
default = self.get_field_descriptor(i).tags.get("default", [None])[0]
if default is not None:
defaults[i] = default
self.update(defaults)
|
def set_defaults(self):
"""
sets all empty fields for which a default value is defined to default value
"""
defaults = {}
for i in range(len(self)):
if i in self._data:
continue
default = self.get_field_descriptor(i).tags.get("default", [None])[0]
if default is not None:
defaults[i] = default
self.update(defaults)
|
[
"sets",
"all",
"empty",
"fields",
"for",
"which",
"a",
"default",
"value",
"is",
"defined",
"to",
"default",
"value"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L461-L473
|
[
"def",
"set_defaults",
"(",
"self",
")",
":",
"defaults",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
")",
")",
":",
"if",
"i",
"in",
"self",
".",
"_data",
":",
"continue",
"default",
"=",
"self",
".",
"get_field_descriptor",
"(",
"i",
")",
".",
"tags",
".",
"get",
"(",
"\"default\"",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"if",
"default",
"is",
"not",
"None",
":",
"defaults",
"[",
"i",
"]",
"=",
"default",
"self",
".",
"update",
"(",
"defaults",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.add_fields
|
This method only works for extensible fields. It allows to add values without precising their fields' names
or indexes.
Parameters
----------
args: field values
|
oplus/epm/record.py
|
def add_fields(self, *args):
"""
This method only works for extensible fields. It allows to add values without precising their fields' names
or indexes.
Parameters
----------
args: field values
"""
if not self.is_extensible():
raise TypeError("Can't use add_fields on a non extensible record.")
# prepare update data
self_len = len(self)
data = dict([(self_len + i, args[i]) for i in range(len(args))])
# update
self.update(data)
|
def add_fields(self, *args):
"""
This method only works for extensible fields. It allows to add values without precising their fields' names
or indexes.
Parameters
----------
args: field values
"""
if not self.is_extensible():
raise TypeError("Can't use add_fields on a non extensible record.")
# prepare update data
self_len = len(self)
data = dict([(self_len + i, args[i]) for i in range(len(args))])
# update
self.update(data)
|
[
"This",
"method",
"only",
"works",
"for",
"extensible",
"fields",
".",
"It",
"allows",
"to",
"add",
"values",
"without",
"precising",
"their",
"fields",
"names",
"or",
"indexes",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L476-L493
|
[
"def",
"add_fields",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"not",
"self",
".",
"is_extensible",
"(",
")",
":",
"raise",
"TypeError",
"(",
"\"Can't use add_fields on a non extensible record.\"",
")",
"# prepare update data",
"self_len",
"=",
"len",
"(",
"self",
")",
"data",
"=",
"dict",
"(",
"[",
"(",
"self_len",
"+",
"i",
",",
"args",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"args",
")",
")",
"]",
")",
"# update",
"self",
".",
"update",
"(",
"data",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.pop
|
This method only works for extensible fields. It allows to remove a value and shift all other values to fill
the gap.
Parameters
----------
index: int, default None
index of field to remove.
Returns
-------
serialize value of popped field
|
oplus/epm/record.py
|
def pop(self, index=None):
"""
This method only works for extensible fields. It allows to remove a value and shift all other values to fill
the gap.
Parameters
----------
index: int, default None
index of field to remove.
Returns
-------
serialize value of popped field
"""
# prepare index (will check for extensible)
index = self._prepare_pop_insert_index(index=index)
# get extensible info
cycle_start, cycle_len, patterns = self.get_extensible_info()
# remove extensible fields
fields = self.clear_extensible_fields()
# pop
serialized_value = fields.pop(index-cycle_start)
# add remaining
self.add_fields(*fields)
return serialized_value
|
def pop(self, index=None):
"""
This method only works for extensible fields. It allows to remove a value and shift all other values to fill
the gap.
Parameters
----------
index: int, default None
index of field to remove.
Returns
-------
serialize value of popped field
"""
# prepare index (will check for extensible)
index = self._prepare_pop_insert_index(index=index)
# get extensible info
cycle_start, cycle_len, patterns = self.get_extensible_info()
# remove extensible fields
fields = self.clear_extensible_fields()
# pop
serialized_value = fields.pop(index-cycle_start)
# add remaining
self.add_fields(*fields)
return serialized_value
|
[
"This",
"method",
"only",
"works",
"for",
"extensible",
"fields",
".",
"It",
"allows",
"to",
"remove",
"a",
"value",
"and",
"shift",
"all",
"other",
"values",
"to",
"fill",
"the",
"gap",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L495-L524
|
[
"def",
"pop",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"# prepare index (will check for extensible)",
"index",
"=",
"self",
".",
"_prepare_pop_insert_index",
"(",
"index",
"=",
"index",
")",
"# get extensible info",
"cycle_start",
",",
"cycle_len",
",",
"patterns",
"=",
"self",
".",
"get_extensible_info",
"(",
")",
"# remove extensible fields",
"fields",
"=",
"self",
".",
"clear_extensible_fields",
"(",
")",
"# pop",
"serialized_value",
"=",
"fields",
".",
"pop",
"(",
"index",
"-",
"cycle_start",
")",
"# add remaining",
"self",
".",
"add_fields",
"(",
"*",
"fields",
")",
"return",
"serialized_value"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.insert
|
This method only works for extensible fields. It allows to insert a value, and shifts all other following
values.
Parameters
----------
index: position of insertion
value: value to insert
|
oplus/epm/record.py
|
def insert(self, index, value):
"""
This method only works for extensible fields. It allows to insert a value, and shifts all other following
values.
Parameters
----------
index: position of insertion
value: value to insert
"""
# prepare index (will check for extensible)
index = self._prepare_pop_insert_index(index=index)
# remove extensible fields
fields = self.clear_extensible_fields()
# insert
fields.insert(index, value)
# add new list
self.add_fields(*fields)
|
def insert(self, index, value):
"""
This method only works for extensible fields. It allows to insert a value, and shifts all other following
values.
Parameters
----------
index: position of insertion
value: value to insert
"""
# prepare index (will check for extensible)
index = self._prepare_pop_insert_index(index=index)
# remove extensible fields
fields = self.clear_extensible_fields()
# insert
fields.insert(index, value)
# add new list
self.add_fields(*fields)
|
[
"This",
"method",
"only",
"works",
"for",
"extensible",
"fields",
".",
"It",
"allows",
"to",
"insert",
"a",
"value",
"and",
"shifts",
"all",
"other",
"following",
"values",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L526-L546
|
[
"def",
"insert",
"(",
"self",
",",
"index",
",",
"value",
")",
":",
"# prepare index (will check for extensible)",
"index",
"=",
"self",
".",
"_prepare_pop_insert_index",
"(",
"index",
"=",
"index",
")",
"# remove extensible fields",
"fields",
"=",
"self",
".",
"clear_extensible_fields",
"(",
")",
"# insert",
"fields",
".",
"insert",
"(",
"index",
",",
"value",
")",
"# add new list",
"self",
".",
"add_fields",
"(",
"*",
"fields",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.clear_extensible_fields
|
Returns
-------
list of cleared fields (serialized)
|
oplus/epm/record.py
|
def clear_extensible_fields(self):
"""
Returns
-------
list of cleared fields (serialized)
"""
if not self.is_extensible():
raise TypeError("Can't use add_fields on a non extensible record.")
cycle_start, cycle_len, patterns = self.get_extensible_info()
return [self.get_serialized_value(i) for i in range(cycle_start, len(self))]
|
def clear_extensible_fields(self):
"""
Returns
-------
list of cleared fields (serialized)
"""
if not self.is_extensible():
raise TypeError("Can't use add_fields on a non extensible record.")
cycle_start, cycle_len, patterns = self.get_extensible_info()
return [self.get_serialized_value(i) for i in range(cycle_start, len(self))]
|
[
"Returns",
"-------",
"list",
"of",
"cleared",
"fields",
"(",
"serialized",
")"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L548-L557
|
[
"def",
"clear_extensible_fields",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_extensible",
"(",
")",
":",
"raise",
"TypeError",
"(",
"\"Can't use add_fields on a non extensible record.\"",
")",
"cycle_start",
",",
"cycle_len",
",",
"patterns",
"=",
"self",
".",
"get_extensible_info",
"(",
")",
"return",
"[",
"self",
".",
"get_serialized_value",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"cycle_start",
",",
"len",
"(",
"self",
")",
")",
"]"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.delete
|
Deletes record, and removes it from database.
|
oplus/epm/record.py
|
def delete(self):
"""
Deletes record, and removes it from database.
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. unregister: links, hooks and external files
# 3. remove from table without unregistering
# unregister links
self._unregister_links()
# unregister hooks
self._unregister_hooks()
# unregister external files
self._unregister_external_files()
# tell table to remove without unregistering
self.get_table()._dev_remove_record_without_unregistering(self)
# make stale
self._table = None
self._data = None
|
def delete(self):
"""
Deletes record, and removes it from database.
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. unregister: links, hooks and external files
# 3. remove from table without unregistering
# unregister links
self._unregister_links()
# unregister hooks
self._unregister_hooks()
# unregister external files
self._unregister_external_files()
# tell table to remove without unregistering
self.get_table()._dev_remove_record_without_unregistering(self)
# make stale
self._table = None
self._data = None
|
[
"Deletes",
"record",
"and",
"removes",
"it",
"from",
"database",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L560-L585
|
[
"def",
"delete",
"(",
"self",
")",
":",
"# workflow",
"# --------",
"# (methods belonging to create/update/delete framework:",
"# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)",
"# 1. unregister: links, hooks and external files",
"# 3. remove from table without unregistering",
"# unregister links",
"self",
".",
"_unregister_links",
"(",
")",
"# unregister hooks",
"self",
".",
"_unregister_hooks",
"(",
")",
"# unregister external files",
"self",
".",
"_unregister_external_files",
"(",
")",
"# tell table to remove without unregistering",
"self",
".",
"get_table",
"(",
")",
".",
"_dev_remove_record_without_unregistering",
"(",
"self",
")",
"# make stale",
"self",
".",
"_table",
"=",
"None",
"self",
".",
"_data",
"=",
"None"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.get_field_descriptor
|
Parameters
----------
ref_or_index: str or int
field lowercase name, or field position
Returns
-------
Field descriptor (info contained in Idd)
|
oplus/epm/record.py
|
def get_field_descriptor(self, ref_or_index):
"""
Parameters
----------
ref_or_index: str or int
field lowercase name, or field position
Returns
-------
Field descriptor (info contained in Idd)
"""
if isinstance(ref_or_index, int):
index = ref_or_index
else:
index = self._table._dev_descriptor.get_field_index(ref_or_index)
return self._table._dev_descriptor.get_field_descriptor(index)
|
def get_field_descriptor(self, ref_or_index):
"""
Parameters
----------
ref_or_index: str or int
field lowercase name, or field position
Returns
-------
Field descriptor (info contained in Idd)
"""
if isinstance(ref_or_index, int):
index = ref_or_index
else:
index = self._table._dev_descriptor.get_field_index(ref_or_index)
return self._table._dev_descriptor.get_field_descriptor(index)
|
[
"Parameters",
"----------",
"ref_or_index",
":",
"str",
"or",
"int",
"field",
"lowercase",
"name",
"or",
"field",
"position"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L588-L603
|
[
"def",
"get_field_descriptor",
"(",
"self",
",",
"ref_or_index",
")",
":",
"if",
"isinstance",
"(",
"ref_or_index",
",",
"int",
")",
":",
"index",
"=",
"ref_or_index",
"else",
":",
"index",
"=",
"self",
".",
"_table",
".",
"_dev_descriptor",
".",
"get_field_index",
"(",
"ref_or_index",
")",
"return",
"self",
".",
"_table",
".",
"_dev_descriptor",
".",
"get_field_descriptor",
"(",
"index",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.to_json_data
|
Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
A dictionary of serialized data.
|
oplus/epm/record.py
|
def to_json_data(self, model_name=None):
"""
Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
A dictionary of serialized data.
"""
return collections.OrderedDict([(k, self.get_serialized_value(k, model_name=model_name )) for k in self._data])
|
def to_json_data(self, model_name=None):
"""
Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
A dictionary of serialized data.
"""
return collections.OrderedDict([(k, self.get_serialized_value(k, model_name=model_name )) for k in self._data])
|
[
"Parameters",
"----------",
"model_name",
":",
"str",
"default",
"None",
"if",
"given",
"will",
"be",
"used",
"as",
"external",
"file",
"directory",
"base",
"name"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L623-L634
|
[
"def",
"to_json_data",
"(",
"self",
",",
"model_name",
"=",
"None",
")",
":",
"return",
"collections",
".",
"OrderedDict",
"(",
"[",
"(",
"k",
",",
"self",
".",
"get_serialized_value",
"(",
"k",
",",
"model_name",
"=",
"model_name",
")",
")",
"for",
"k",
"in",
"self",
".",
"_data",
"]",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Record.to_idf
|
Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
idf string
|
oplus/epm/record.py
|
def to_idf(self, model_name=None):
"""
Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
idf string
"""
json_data = self.to_json_data(model_name=model_name)
# record descriptor ref
s = f"{self._table._dev_descriptor.table_name},\n"
# fields
# fields_nb: we don't use len(self) but max(self). We wan't to stop if no more values (even base fields)
# because some idd records are defined without extensibles (although they should used them), for example
# construction, and eplus does not know what to do...
fields_nb = max(self._data)+1
for i in range(fields_nb):
# value
tab = " " * TAB_LEN
raw_value = json_data.get(i, "")
content = f"{tab}{raw_value}{';' if i == fields_nb-1 else ','}"
# comment
spaces_nb = COMMENT_COLUMN_START - len(content)
if spaces_nb < 0:
spaces_nb = TAB_LEN
# comment
name = self._table._dev_descriptor.get_extended_name(i)
comment = "" if name is None else " " * spaces_nb + f"! {name}"
# store
s += f"{content}{comment}\n"
return s
|
def to_idf(self, model_name=None):
"""
Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
idf string
"""
json_data = self.to_json_data(model_name=model_name)
# record descriptor ref
s = f"{self._table._dev_descriptor.table_name},\n"
# fields
# fields_nb: we don't use len(self) but max(self). We wan't to stop if no more values (even base fields)
# because some idd records are defined without extensibles (although they should used them), for example
# construction, and eplus does not know what to do...
fields_nb = max(self._data)+1
for i in range(fields_nb):
# value
tab = " " * TAB_LEN
raw_value = json_data.get(i, "")
content = f"{tab}{raw_value}{';' if i == fields_nb-1 else ','}"
# comment
spaces_nb = COMMENT_COLUMN_START - len(content)
if spaces_nb < 0:
spaces_nb = TAB_LEN
# comment
name = self._table._dev_descriptor.get_extended_name(i)
comment = "" if name is None else " " * spaces_nb + f"! {name}"
# store
s += f"{content}{comment}\n"
return s
|
[
"Parameters",
"----------",
"model_name",
":",
"str",
"default",
"None",
"if",
"given",
"will",
"be",
"used",
"as",
"external",
"file",
"directory",
"base",
"name"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L636-L676
|
[
"def",
"to_idf",
"(",
"self",
",",
"model_name",
"=",
"None",
")",
":",
"json_data",
"=",
"self",
".",
"to_json_data",
"(",
"model_name",
"=",
"model_name",
")",
"# record descriptor ref",
"s",
"=",
"f\"{self._table._dev_descriptor.table_name},\\n\"",
"# fields",
"# fields_nb: we don't use len(self) but max(self). We wan't to stop if no more values (even base fields)",
"# because some idd records are defined without extensibles (although they should used them), for example",
"# construction, and eplus does not know what to do...",
"fields_nb",
"=",
"max",
"(",
"self",
".",
"_data",
")",
"+",
"1",
"for",
"i",
"in",
"range",
"(",
"fields_nb",
")",
":",
"# value",
"tab",
"=",
"\" \"",
"*",
"TAB_LEN",
"raw_value",
"=",
"json_data",
".",
"get",
"(",
"i",
",",
"\"\"",
")",
"content",
"=",
"f\"{tab}{raw_value}{';' if i == fields_nb-1 else ','}\"",
"# comment",
"spaces_nb",
"=",
"COMMENT_COLUMN_START",
"-",
"len",
"(",
"content",
")",
"if",
"spaces_nb",
"<",
"0",
":",
"spaces_nb",
"=",
"TAB_LEN",
"# comment",
"name",
"=",
"self",
".",
"_table",
".",
"_dev_descriptor",
".",
"get_extended_name",
"(",
"i",
")",
"comment",
"=",
"\"\"",
"if",
"name",
"is",
"None",
"else",
"\" \"",
"*",
"spaces_nb",
"+",
"f\"! {name}\"",
"# store",
"s",
"+=",
"f\"{content}{comment}\\n\"",
"return",
"s"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
check
|
Tested under EPlus 8.1.0 on Windows (Geoffroy).
|
check/check_many_simulations.py
|
def check():
"""
Tested under EPlus 8.1.0 on Windows (Geoffroy).
"""
# !! CAN BE VERY LONG
epw_path = os.path.join(CONF.eplus_base_dir_path, "WeatherData",
"USA_VA_Sterling-Washington.Dulles.Intl.AP.724030_TMY3.epw")
idf_dir_path = os.path.join(CONF.eplus_base_dir_path, "ExampleFiles")
test_num = 0
for file_num, file_name in enumerate(os.listdir(idf_dir_path)):
if file_num < START_FILE_NUM:
continue
base, ext = os.path.splitext(file_name)
if ext == ".idf":
with tempfile.TemporaryDirectory() as simulation_dir_path:
s = simulate(os.path.join(idf_dir_path, file_name), epw_path,
simulation_dir_path if DEBUG_SIMUL_DIR_PATH is None else
DEBUG_SIMUL_DIR_PATH)
if s.exists("eio"):
eio = Eio(s.get_file_path("eio")) # raise error if problem
test_num += 1
if test_num == MAX_TESTS_NB:
break
|
def check():
"""
Tested under EPlus 8.1.0 on Windows (Geoffroy).
"""
# !! CAN BE VERY LONG
epw_path = os.path.join(CONF.eplus_base_dir_path, "WeatherData",
"USA_VA_Sterling-Washington.Dulles.Intl.AP.724030_TMY3.epw")
idf_dir_path = os.path.join(CONF.eplus_base_dir_path, "ExampleFiles")
test_num = 0
for file_num, file_name in enumerate(os.listdir(idf_dir_path)):
if file_num < START_FILE_NUM:
continue
base, ext = os.path.splitext(file_name)
if ext == ".idf":
with tempfile.TemporaryDirectory() as simulation_dir_path:
s = simulate(os.path.join(idf_dir_path, file_name), epw_path,
simulation_dir_path if DEBUG_SIMUL_DIR_PATH is None else
DEBUG_SIMUL_DIR_PATH)
if s.exists("eio"):
eio = Eio(s.get_file_path("eio")) # raise error if problem
test_num += 1
if test_num == MAX_TESTS_NB:
break
|
[
"Tested",
"under",
"EPlus",
"8",
".",
"1",
".",
"0",
"on",
"Windows",
"(",
"Geoffroy",
")",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/check/check_many_simulations.py#L12-L34
|
[
"def",
"check",
"(",
")",
":",
"# !! CAN BE VERY LONG",
"epw_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CONF",
".",
"eplus_base_dir_path",
",",
"\"WeatherData\"",
",",
"\"USA_VA_Sterling-Washington.Dulles.Intl.AP.724030_TMY3.epw\"",
")",
"idf_dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CONF",
".",
"eplus_base_dir_path",
",",
"\"ExampleFiles\"",
")",
"test_num",
"=",
"0",
"for",
"file_num",
",",
"file_name",
"in",
"enumerate",
"(",
"os",
".",
"listdir",
"(",
"idf_dir_path",
")",
")",
":",
"if",
"file_num",
"<",
"START_FILE_NUM",
":",
"continue",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"if",
"ext",
"==",
"\".idf\"",
":",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"simulation_dir_path",
":",
"s",
"=",
"simulate",
"(",
"os",
".",
"path",
".",
"join",
"(",
"idf_dir_path",
",",
"file_name",
")",
",",
"epw_path",
",",
"simulation_dir_path",
"if",
"DEBUG_SIMUL_DIR_PATH",
"is",
"None",
"else",
"DEBUG_SIMUL_DIR_PATH",
")",
"if",
"s",
".",
"exists",
"(",
"\"eio\"",
")",
":",
"eio",
"=",
"Eio",
"(",
"s",
".",
"get_file_path",
"(",
"\"eio\"",
")",
")",
"# raise error if problem",
"test_num",
"+=",
"1",
"if",
"test_num",
"==",
"MAX_TESTS_NB",
":",
"break"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
StandardOutput.get_data
|
Parameters
----------
environment_title_or_num
frequency: 'str', default None
'timestep', 'hourly', 'daily', 'monthly', 'annual', 'run_period'
If None, will look for the smallest frequency of environment.
|
oplus/standard_output/standard_output.py
|
def get_data(self, environment_title_or_num=-1, frequency=None):
"""
Parameters
----------
environment_title_or_num
frequency: 'str', default None
'timestep', 'hourly', 'daily', 'monthly', 'annual', 'run_period'
If None, will look for the smallest frequency of environment.
"""
# manage environment num
if isinstance(environment_title_or_num, int):
environment_title = tuple(self._raw_environments.keys())[environment_title_or_num]
else:
environment_title = environment_title_or_num
if environment_title not in self._dfs:
raise ValueError(f"No environment named {environment_title}. Available environments: {tuple(self._dfs)}.")
# get environment dataframes
environment_dfs = self._dfs[environment_title]
# find first non null frequency if not given
if frequency is None:
for frequency in FREQUENCIES:
if environment_dfs[frequency] is not None:
break
# check frequency
if frequency not in FREQUENCIES:
raise ValueError(f"Unknown frequency: {frequency}. Available frequencies: {FREQUENCIES}")
return self._dfs[environment_title][frequency]
|
def get_data(self, environment_title_or_num=-1, frequency=None):
"""
Parameters
----------
environment_title_or_num
frequency: 'str', default None
'timestep', 'hourly', 'daily', 'monthly', 'annual', 'run_period'
If None, will look for the smallest frequency of environment.
"""
# manage environment num
if isinstance(environment_title_or_num, int):
environment_title = tuple(self._raw_environments.keys())[environment_title_or_num]
else:
environment_title = environment_title_or_num
if environment_title not in self._dfs:
raise ValueError(f"No environment named {environment_title}. Available environments: {tuple(self._dfs)}.")
# get environment dataframes
environment_dfs = self._dfs[environment_title]
# find first non null frequency if not given
if frequency is None:
for frequency in FREQUENCIES:
if environment_dfs[frequency] is not None:
break
# check frequency
if frequency not in FREQUENCIES:
raise ValueError(f"Unknown frequency: {frequency}. Available frequencies: {FREQUENCIES}")
return self._dfs[environment_title][frequency]
|
[
"Parameters",
"----------",
"environment_title_or_num",
"frequency",
":",
"str",
"default",
"None",
"timestep",
"hourly",
"daily",
"monthly",
"annual",
"run_period",
"If",
"None",
"will",
"look",
"for",
"the",
"smallest",
"frequency",
"of",
"environment",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/standard_output/standard_output.py#L53-L85
|
[
"def",
"get_data",
"(",
"self",
",",
"environment_title_or_num",
"=",
"-",
"1",
",",
"frequency",
"=",
"None",
")",
":",
"# manage environment num",
"if",
"isinstance",
"(",
"environment_title_or_num",
",",
"int",
")",
":",
"environment_title",
"=",
"tuple",
"(",
"self",
".",
"_raw_environments",
".",
"keys",
"(",
")",
")",
"[",
"environment_title_or_num",
"]",
"else",
":",
"environment_title",
"=",
"environment_title_or_num",
"if",
"environment_title",
"not",
"in",
"self",
".",
"_dfs",
":",
"raise",
"ValueError",
"(",
"f\"No environment named {environment_title}. Available environments: {tuple(self._dfs)}.\"",
")",
"# get environment dataframes",
"environment_dfs",
"=",
"self",
".",
"_dfs",
"[",
"environment_title",
"]",
"# find first non null frequency if not given",
"if",
"frequency",
"is",
"None",
":",
"for",
"frequency",
"in",
"FREQUENCIES",
":",
"if",
"environment_dfs",
"[",
"frequency",
"]",
"is",
"not",
"None",
":",
"break",
"# check frequency",
"if",
"frequency",
"not",
"in",
"FREQUENCIES",
":",
"raise",
"ValueError",
"(",
"f\"Unknown frequency: {frequency}. Available frequencies: {FREQUENCIES}\"",
")",
"return",
"self",
".",
"_dfs",
"[",
"environment_title",
"]",
"[",
"frequency",
"]"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
get_documented_add
|
this hack is used to document add function
a methods __doc__ attribute is read-only (or must use metaclasses, what I certainly don't want to do...)
we therefore create a function (who's __doc__ attribute is read/write), and will bind it to Table in __init__
|
oplus/epm/table.py
|
def get_documented_add(self, record_descriptors):
"""
this hack is used to document add function
a methods __doc__ attribute is read-only (or must use metaclasses, what I certainly don't want to do...)
we therefore create a function (who's __doc__ attribute is read/write), and will bind it to Table in __init__
"""
def add(data=None, **or_data):
"""
Parameters
----------
data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax)
or_data: keyword arguments containing field names as keys (kwargs syntax)
A lowercase name is the lowercase EnergyPlus name, for which all non alpha-numeric characters have been replaced
by underscores. All multiple consecutive underscores are then replaced by one unique underscore.
The two syntaxes are not meant to cohabit. The kwargs syntax is nicer, but does not enable to use indexes
instead of names.
Examples
--------
for Schedule:Compact table:
schedule = table.add( # kwarg syntax
name="Heating Setpoint Schedule - new[1]",
schedule_type_limits_name="Any Number",
field_1="Through: 12/31",
field_2="For: AllDays",
field_3="Until: 24:00,20.0"
)
schedule = table.add({ # dict syntax, mixing names and index keys
name="Heating Setpoint Schedule - new[1]",
schedule_type_limits_name="Any Number",
2="Through: 12/31",
3="For: AllDays",
4="Until: 24:00,20.0"
})
Returns
-------
Created Record instance
"""
return self.batch_add([or_data if data is None else data])[0]
add.__doc__ = "\n".join([fd.ref.lower() for fd in record_descriptors if fd.ref is not None])
return add
|
def get_documented_add(self, record_descriptors):
"""
this hack is used to document add function
a methods __doc__ attribute is read-only (or must use metaclasses, what I certainly don't want to do...)
we therefore create a function (who's __doc__ attribute is read/write), and will bind it to Table in __init__
"""
def add(data=None, **or_data):
"""
Parameters
----------
data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax)
or_data: keyword arguments containing field names as keys (kwargs syntax)
A lowercase name is the lowercase EnergyPlus name, for which all non alpha-numeric characters have been replaced
by underscores. All multiple consecutive underscores are then replaced by one unique underscore.
The two syntaxes are not meant to cohabit. The kwargs syntax is nicer, but does not enable to use indexes
instead of names.
Examples
--------
for Schedule:Compact table:
schedule = table.add( # kwarg syntax
name="Heating Setpoint Schedule - new[1]",
schedule_type_limits_name="Any Number",
field_1="Through: 12/31",
field_2="For: AllDays",
field_3="Until: 24:00,20.0"
)
schedule = table.add({ # dict syntax, mixing names and index keys
name="Heating Setpoint Schedule - new[1]",
schedule_type_limits_name="Any Number",
2="Through: 12/31",
3="For: AllDays",
4="Until: 24:00,20.0"
})
Returns
-------
Created Record instance
"""
return self.batch_add([or_data if data is None else data])[0]
add.__doc__ = "\n".join([fd.ref.lower() for fd in record_descriptors if fd.ref is not None])
return add
|
[
"this",
"hack",
"is",
"used",
"to",
"document",
"add",
"function",
"a",
"methods",
"__doc__",
"attribute",
"is",
"read",
"-",
"only",
"(",
"or",
"must",
"use",
"metaclasses",
"what",
"I",
"certainly",
"don",
"t",
"want",
"to",
"do",
"...",
")",
"we",
"therefore",
"create",
"a",
"function",
"(",
"who",
"s",
"__doc__",
"attribute",
"is",
"read",
"/",
"write",
")",
"and",
"will",
"bind",
"it",
"to",
"Table",
"in",
"__init__"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/table.py#L6-L53
|
[
"def",
"get_documented_add",
"(",
"self",
",",
"record_descriptors",
")",
":",
"def",
"add",
"(",
"data",
"=",
"None",
",",
"*",
"*",
"or_data",
")",
":",
"\"\"\"\n Parameters\n ----------\n data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax)\n or_data: keyword arguments containing field names as keys (kwargs syntax)\n\n A lowercase name is the lowercase EnergyPlus name, for which all non alpha-numeric characters have been replaced\n by underscores. All multiple consecutive underscores are then replaced by one unique underscore.\n\n The two syntaxes are not meant to cohabit. The kwargs syntax is nicer, but does not enable to use indexes\n instead of names.\n\n Examples\n --------\n for Schedule:Compact table:\n\n schedule = table.add( # kwarg syntax\n name=\"Heating Setpoint Schedule - new[1]\",\n schedule_type_limits_name=\"Any Number\",\n field_1=\"Through: 12/31\",\n field_2=\"For: AllDays\",\n field_3=\"Until: 24:00,20.0\"\n )\n\n schedule = table.add({ # dict syntax, mixing names and index keys\n name=\"Heating Setpoint Schedule - new[1]\",\n schedule_type_limits_name=\"Any Number\",\n 2=\"Through: 12/31\",\n 3=\"For: AllDays\",\n 4=\"Until: 24:00,20.0\"\n })\n\n Returns\n -------\n Created Record instance\n \"\"\"",
"return",
"self",
".",
"batch_add",
"(",
"[",
"or_data",
"if",
"data",
"is",
"None",
"else",
"data",
"]",
")",
"[",
"0",
"]",
"add",
".",
"__doc__",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"fd",
".",
"ref",
".",
"lower",
"(",
")",
"for",
"fd",
"in",
"record_descriptors",
"if",
"fd",
".",
"ref",
"is",
"not",
"None",
"]",
")",
"return",
"add"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Table._dev_add_inert
|
inert: hooks and links are not activated
|
oplus/epm/table.py
|
def _dev_add_inert(self, records_data):
"""
inert: hooks and links are not activated
"""
added_records = []
for r_data in records_data:
# create record
record = Record(
self,
data=r_data
)
# store
# we don't check uniqueness here => will be done while checking hooks
self._records[record.get_pk()] = record
# remember record
added_records.append(record)
return added_records
|
def _dev_add_inert(self, records_data):
"""
inert: hooks and links are not activated
"""
added_records = []
for r_data in records_data:
# create record
record = Record(
self,
data=r_data
)
# store
# we don't check uniqueness here => will be done while checking hooks
self._records[record.get_pk()] = record
# remember record
added_records.append(record)
return added_records
|
[
"inert",
":",
"hooks",
"and",
"links",
"are",
"not",
"activated"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/table.py#L90-L109
|
[
"def",
"_dev_add_inert",
"(",
"self",
",",
"records_data",
")",
":",
"added_records",
"=",
"[",
"]",
"for",
"r_data",
"in",
"records_data",
":",
"# create record",
"record",
"=",
"Record",
"(",
"self",
",",
"data",
"=",
"r_data",
")",
"# store",
"# we don't check uniqueness here => will be done while checking hooks",
"self",
".",
"_records",
"[",
"record",
".",
"get_pk",
"(",
")",
"]",
"=",
"record",
"# remember record",
"added_records",
".",
"append",
"(",
"record",
")",
"return",
"added_records"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Table.select
|
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
|
oplus/epm/table.py
|
def select(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
"""
records = self._records.values() if filter_by is None else filter(filter_by, self._records.values())
return Queryset(self, records=records)
|
def select(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
"""
records = self._records.values() if filter_by is None else filter(filter_by, self._records.values())
return Queryset(self, records=records)
|
[
"Parameters",
"----------",
"filter_by",
":",
"callable",
"default",
"None",
"Callable",
"must",
"take",
"one",
"argument",
"(",
"a",
"record",
"of",
"table",
")",
"and",
"return",
"True",
"to",
"keep",
"record",
"or",
"False",
"to",
"skip",
"it",
".",
"Example",
":",
".",
"select",
"(",
"lambda",
"x",
":",
"x",
".",
"name",
"==",
"my_name",
")",
".",
"If",
"None",
"records",
"are",
"not",
"filtered",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/table.py#L175-L189
|
[
"def",
"select",
"(",
"self",
",",
"filter_by",
"=",
"None",
")",
":",
"records",
"=",
"self",
".",
"_records",
".",
"values",
"(",
")",
"if",
"filter_by",
"is",
"None",
"else",
"filter",
"(",
"filter_by",
",",
"self",
".",
"_records",
".",
"values",
"(",
")",
")",
"return",
"Queryset",
"(",
"self",
",",
"records",
"=",
"records",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Table.one
|
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .one(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Record instance if one and only one record is found. Else raises.
Raises
------
RecordDoesNotExistError if no record is found
MultipleRecordsReturnedError if multiple records are found
|
oplus/epm/table.py
|
def one(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .one(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Record instance if one and only one record is found. Else raises.
Raises
------
RecordDoesNotExistError if no record is found
MultipleRecordsReturnedError if multiple records are found
"""
return Queryset(self, records=self._records.values()).one(filter_by=filter_by)
|
def one(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .one(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Record instance if one and only one record is found. Else raises.
Raises
------
RecordDoesNotExistError if no record is found
MultipleRecordsReturnedError if multiple records are found
"""
return Queryset(self, records=self._records.values()).one(filter_by=filter_by)
|
[
"Parameters",
"----------",
"filter_by",
":",
"callable",
"default",
"None",
"Callable",
"must",
"take",
"one",
"argument",
"(",
"a",
"record",
"of",
"table",
")",
"and",
"return",
"True",
"to",
"keep",
"record",
"or",
"False",
"to",
"skip",
"it",
".",
"Example",
":",
".",
"one",
"(",
"lambda",
"x",
":",
"x",
".",
"name",
"==",
"my_name",
")",
".",
"If",
"None",
"records",
"are",
"not",
"filtered",
"."
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/table.py#L191-L209
|
[
"def",
"one",
"(",
"self",
",",
"filter_by",
"=",
"None",
")",
":",
"return",
"Queryset",
"(",
"self",
",",
"records",
"=",
"self",
".",
"_records",
".",
"values",
"(",
")",
")",
".",
"one",
"(",
"filter_by",
"=",
"filter_by",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Table.batch_add
|
Parameters
----------
records_data: list of dictionaries containing records data. Keys of dictionary may be field names and/or field
indexes
Returns
-------
Queryset instance of added records
|
oplus/epm/table.py
|
def batch_add(self, records_data):
"""
Parameters
----------
records_data: list of dictionaries containing records data. Keys of dictionary may be field names and/or field
indexes
Returns
-------
Queryset instance of added records
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. add inert
# * data is checked
# * old links are unregistered
# * record is stored in table (=> pk uniqueness is checked)
# 2. activate: hooks, links, external files
# add inert
added_records = self._dev_add_inert(records_data)
# activate hooks
for r in added_records:
r._dev_activate_hooks()
# activate links and external files
for r in added_records:
r._dev_activate_links()
r._dev_activate_external_files()
return Queryset(self, records=added_records)
|
def batch_add(self, records_data):
"""
Parameters
----------
records_data: list of dictionaries containing records data. Keys of dictionary may be field names and/or field
indexes
Returns
-------
Queryset instance of added records
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. add inert
# * data is checked
# * old links are unregistered
# * record is stored in table (=> pk uniqueness is checked)
# 2. activate: hooks, links, external files
# add inert
added_records = self._dev_add_inert(records_data)
# activate hooks
for r in added_records:
r._dev_activate_hooks()
# activate links and external files
for r in added_records:
r._dev_activate_links()
r._dev_activate_external_files()
return Queryset(self, records=added_records)
|
[
"Parameters",
"----------",
"records_data",
":",
"list",
"of",
"dictionaries",
"containing",
"records",
"data",
".",
"Keys",
"of",
"dictionary",
"may",
"be",
"field",
"names",
"and",
"/",
"or",
"field",
"indexes"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/table.py#L216-L250
|
[
"def",
"batch_add",
"(",
"self",
",",
"records_data",
")",
":",
"# workflow",
"# --------",
"# (methods belonging to create/update/delete framework:",
"# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)",
"# 1. add inert",
"# * data is checked",
"# * old links are unregistered",
"# * record is stored in table (=> pk uniqueness is checked)",
"# 2. activate: hooks, links, external files",
"# add inert",
"added_records",
"=",
"self",
".",
"_dev_add_inert",
"(",
"records_data",
")",
"# activate hooks",
"for",
"r",
"in",
"added_records",
":",
"r",
".",
"_dev_activate_hooks",
"(",
")",
"# activate links and external files",
"for",
"r",
"in",
"added_records",
":",
"r",
".",
"_dev_activate_links",
"(",
")",
"r",
".",
"_dev_activate_external_files",
"(",
")",
"return",
"Queryset",
"(",
"self",
",",
"records",
"=",
"added_records",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
RelationsManager.register_record_hook
|
target record must have been set
|
oplus/epm/relations_manager.py
|
def register_record_hook(self, hook):
"""
target record must have been set
"""
for key in hook.keys:
if key in self._record_hooks:
field_descriptor = hook.target_record.get_field_descriptor(hook.target_index)
raise FieldValidationError(
f"Reference key already exists, can't create: {key}. "
f"{field_descriptor.get_error_location_message(hook.target_value, hook.target_index)}"
)
self._record_hooks[key] = hook
|
def register_record_hook(self, hook):
"""
target record must have been set
"""
for key in hook.keys:
if key in self._record_hooks:
field_descriptor = hook.target_record.get_field_descriptor(hook.target_index)
raise FieldValidationError(
f"Reference key already exists, can't create: {key}. "
f"{field_descriptor.get_error_location_message(hook.target_value, hook.target_index)}"
)
self._record_hooks[key] = hook
|
[
"target",
"record",
"must",
"have",
"been",
"set"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/relations_manager.py#L47-L58
|
[
"def",
"register_record_hook",
"(",
"self",
",",
"hook",
")",
":",
"for",
"key",
"in",
"hook",
".",
"keys",
":",
"if",
"key",
"in",
"self",
".",
"_record_hooks",
":",
"field_descriptor",
"=",
"hook",
".",
"target_record",
".",
"get_field_descriptor",
"(",
"hook",
".",
"target_index",
")",
"raise",
"FieldValidationError",
"(",
"f\"Reference key already exists, can't create: {key}. \"",
"f\"{field_descriptor.get_error_location_message(hook.target_value, hook.target_index)}\"",
")",
"self",
".",
"_record_hooks",
"[",
"key",
"]",
"=",
"hook"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
RelationsManager.register_link
|
source record and index must have been set
|
oplus/epm/relations_manager.py
|
def register_link(self, link):
"""
source record and index must have been set
"""
keys = tuple((ref, link.initial_hook_value) for ref in link.hook_references)
# look for a record hook
for k in keys:
if k in self._record_hooks:
# set link target
link.set_target(target_record=self._record_hooks[k].target_record)
break
else:
# look for a table hook
for k in keys:
if k in self._table_hooks:
# set link target
link.set_target(target_table=self._table_hooks[k])
break
else:
field_descriptor = link.source_record.get_field_descriptor(link.source_index)
raise FieldValidationError(
f"No object found with any of given references : {keys}. "
f"{field_descriptor.get_error_location_message(link.initial_hook_value)}"
)
# store by source
if link.source_record not in self._links_by_source:
self._links_by_source[link.source_record] = set()
self._links_by_source[link.source_record].add(link)
# store by target
if link.target not in self._links_by_target:
self._links_by_target[link.target] = set()
self._links_by_target[link.target].add(link)
|
def register_link(self, link):
"""
source record and index must have been set
"""
keys = tuple((ref, link.initial_hook_value) for ref in link.hook_references)
# look for a record hook
for k in keys:
if k in self._record_hooks:
# set link target
link.set_target(target_record=self._record_hooks[k].target_record)
break
else:
# look for a table hook
for k in keys:
if k in self._table_hooks:
# set link target
link.set_target(target_table=self._table_hooks[k])
break
else:
field_descriptor = link.source_record.get_field_descriptor(link.source_index)
raise FieldValidationError(
f"No object found with any of given references : {keys}. "
f"{field_descriptor.get_error_location_message(link.initial_hook_value)}"
)
# store by source
if link.source_record not in self._links_by_source:
self._links_by_source[link.source_record] = set()
self._links_by_source[link.source_record].add(link)
# store by target
if link.target not in self._links_by_target:
self._links_by_target[link.target] = set()
self._links_by_target[link.target].add(link)
|
[
"source",
"record",
"and",
"index",
"must",
"have",
"been",
"set"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/relations_manager.py#L65-L99
|
[
"def",
"register_link",
"(",
"self",
",",
"link",
")",
":",
"keys",
"=",
"tuple",
"(",
"(",
"ref",
",",
"link",
".",
"initial_hook_value",
")",
"for",
"ref",
"in",
"link",
".",
"hook_references",
")",
"# look for a record hook",
"for",
"k",
"in",
"keys",
":",
"if",
"k",
"in",
"self",
".",
"_record_hooks",
":",
"# set link target",
"link",
".",
"set_target",
"(",
"target_record",
"=",
"self",
".",
"_record_hooks",
"[",
"k",
"]",
".",
"target_record",
")",
"break",
"else",
":",
"# look for a table hook",
"for",
"k",
"in",
"keys",
":",
"if",
"k",
"in",
"self",
".",
"_table_hooks",
":",
"# set link target",
"link",
".",
"set_target",
"(",
"target_table",
"=",
"self",
".",
"_table_hooks",
"[",
"k",
"]",
")",
"break",
"else",
":",
"field_descriptor",
"=",
"link",
".",
"source_record",
".",
"get_field_descriptor",
"(",
"link",
".",
"source_index",
")",
"raise",
"FieldValidationError",
"(",
"f\"No object found with any of given references : {keys}. \"",
"f\"{field_descriptor.get_error_location_message(link.initial_hook_value)}\"",
")",
"# store by source",
"if",
"link",
".",
"source_record",
"not",
"in",
"self",
".",
"_links_by_source",
":",
"self",
".",
"_links_by_source",
"[",
"link",
".",
"source_record",
"]",
"=",
"set",
"(",
")",
"self",
".",
"_links_by_source",
"[",
"link",
".",
"source_record",
"]",
".",
"add",
"(",
"link",
")",
"# store by target",
"if",
"link",
".",
"target",
"not",
"in",
"self",
".",
"_links_by_target",
":",
"self",
".",
"_links_by_target",
"[",
"link",
".",
"target",
"]",
"=",
"set",
"(",
")",
"self",
".",
"_links_by_target",
"[",
"link",
".",
"target",
"]",
".",
"add",
"(",
"link",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
run_subprocess
|
Parameters
----------
command: command
cwd: current working directory
stdout: output info stream (must have 'write' method)
stderr: output error stream (must have 'write' method)
shell: see subprocess.Popen
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds)
|
oplus/util.py
|
def run_subprocess(command, cwd=None, stdout=None, stderr=None, shell=False, beat_freq=None):
"""
Parameters
----------
command: command
cwd: current working directory
stdout: output info stream (must have 'write' method)
stderr: output error stream (must have 'write' method)
shell: see subprocess.Popen
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds)
"""
sys.encoding = CONF.encoding
# prepare variables
stdout = sys.stdout if stdout is None else stdout
stderr = sys.stderr if stderr is None else stderr
# run subprocess
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
shell=shell,
universal_newlines=True
) as sub_p:
# link output streams
with redirect_stream(sub_p.stdout, stdout), redirect_stream(sub_p.stderr, stderr):
while True:
try:
sub_p.wait(timeout=beat_freq)
break
except subprocess.TimeoutExpired:
stdout.write("subprocess is still running\n")
if hasattr(sys.stdout, "flush"):
sys.stdout.flush()
return sub_p.returncode
|
def run_subprocess(command, cwd=None, stdout=None, stderr=None, shell=False, beat_freq=None):
"""
Parameters
----------
command: command
cwd: current working directory
stdout: output info stream (must have 'write' method)
stderr: output error stream (must have 'write' method)
shell: see subprocess.Popen
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds)
"""
sys.encoding = CONF.encoding
# prepare variables
stdout = sys.stdout if stdout is None else stdout
stderr = sys.stderr if stderr is None else stderr
# run subprocess
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
shell=shell,
universal_newlines=True
) as sub_p:
# link output streams
with redirect_stream(sub_p.stdout, stdout), redirect_stream(sub_p.stderr, stderr):
while True:
try:
sub_p.wait(timeout=beat_freq)
break
except subprocess.TimeoutExpired:
stdout.write("subprocess is still running\n")
if hasattr(sys.stdout, "flush"):
sys.stdout.flush()
return sub_p.returncode
|
[
"Parameters",
"----------",
"command",
":",
"command",
"cwd",
":",
"current",
"working",
"directory",
"stdout",
":",
"output",
"info",
"stream",
"(",
"must",
"have",
"write",
"method",
")",
"stderr",
":",
"output",
"error",
"stream",
"(",
"must",
"have",
"write",
"method",
")",
"shell",
":",
"see",
"subprocess",
".",
"Popen",
"beat_freq",
":",
"if",
"not",
"none",
"stdout",
"will",
"be",
"used",
"at",
"least",
"every",
"beat_freq",
"(",
"in",
"seconds",
")"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/util.py#L81-L116
|
[
"def",
"run_subprocess",
"(",
"command",
",",
"cwd",
"=",
"None",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"shell",
"=",
"False",
",",
"beat_freq",
"=",
"None",
")",
":",
"sys",
".",
"encoding",
"=",
"CONF",
".",
"encoding",
"# prepare variables",
"stdout",
"=",
"sys",
".",
"stdout",
"if",
"stdout",
"is",
"None",
"else",
"stdout",
"stderr",
"=",
"sys",
".",
"stderr",
"if",
"stderr",
"is",
"None",
"else",
"stderr",
"# run subprocess",
"with",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"cwd",
"=",
"cwd",
",",
"shell",
"=",
"shell",
",",
"universal_newlines",
"=",
"True",
")",
"as",
"sub_p",
":",
"# link output streams",
"with",
"redirect_stream",
"(",
"sub_p",
".",
"stdout",
",",
"stdout",
")",
",",
"redirect_stream",
"(",
"sub_p",
".",
"stderr",
",",
"stderr",
")",
":",
"while",
"True",
":",
"try",
":",
"sub_p",
".",
"wait",
"(",
"timeout",
"=",
"beat_freq",
")",
"break",
"except",
"subprocess",
".",
"TimeoutExpired",
":",
"stdout",
".",
"write",
"(",
"\"subprocess is still running\\n\"",
")",
"if",
"hasattr",
"(",
"sys",
".",
"stdout",
",",
"\"flush\"",
")",
":",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"return",
"sub_p",
".",
"returncode"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
get_string_buffer
|
path_or_content: path or content_str or content_bts or string_io or bytes_io
Returns
-------
string_buffer, path
path will be None if input was not a path
|
oplus/util.py
|
def get_string_buffer(path_or_content, expected_extension):
"""
path_or_content: path or content_str or content_bts or string_io or bytes_io
Returns
-------
string_buffer, path
path will be None if input was not a path
"""
buffer, path = None, None
# path or content string
if isinstance(path_or_content, str):
if path_or_content[-len(expected_extension)-1:] == ".%s" % expected_extension:
if not os.path.isfile(path_or_content):
raise FileNotFoundError("No file at given path: '%s'." % path_or_content)
buffer, path = open(path_or_content, encoding=CONF.encoding), path_or_content
else:
buffer = io.StringIO(path_or_content, )
# text io
elif isinstance(path_or_content, io.TextIOBase):
buffer = path_or_content
# bytes
elif isinstance(path_or_content, bytes):
buffer = io.StringIO(path_or_content.decode(encoding=CONF.encoding))
elif isinstance(path_or_content, io.BufferedIOBase):
buffer = io.StringIO(path_or_content.read().decode(encoding=CONF.encoding))
else:
raise ValueError("path_or_content type could not be identified")
return buffer, path
|
def get_string_buffer(path_or_content, expected_extension):
"""
path_or_content: path or content_str or content_bts or string_io or bytes_io
Returns
-------
string_buffer, path
path will be None if input was not a path
"""
buffer, path = None, None
# path or content string
if isinstance(path_or_content, str):
if path_or_content[-len(expected_extension)-1:] == ".%s" % expected_extension:
if not os.path.isfile(path_or_content):
raise FileNotFoundError("No file at given path: '%s'." % path_or_content)
buffer, path = open(path_or_content, encoding=CONF.encoding), path_or_content
else:
buffer = io.StringIO(path_or_content, )
# text io
elif isinstance(path_or_content, io.TextIOBase):
buffer = path_or_content
# bytes
elif isinstance(path_or_content, bytes):
buffer = io.StringIO(path_or_content.decode(encoding=CONF.encoding))
elif isinstance(path_or_content, io.BufferedIOBase):
buffer = io.StringIO(path_or_content.read().decode(encoding=CONF.encoding))
else:
raise ValueError("path_or_content type could not be identified")
return buffer, path
|
[
"path_or_content",
":",
"path",
"or",
"content_str",
"or",
"content_bts",
"or",
"string_io",
"or",
"bytes_io"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/util.py#L119-L153
|
[
"def",
"get_string_buffer",
"(",
"path_or_content",
",",
"expected_extension",
")",
":",
"buffer",
",",
"path",
"=",
"None",
",",
"None",
"# path or content string",
"if",
"isinstance",
"(",
"path_or_content",
",",
"str",
")",
":",
"if",
"path_or_content",
"[",
"-",
"len",
"(",
"expected_extension",
")",
"-",
"1",
":",
"]",
"==",
"\".%s\"",
"%",
"expected_extension",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path_or_content",
")",
":",
"raise",
"FileNotFoundError",
"(",
"\"No file at given path: '%s'.\"",
"%",
"path_or_content",
")",
"buffer",
",",
"path",
"=",
"open",
"(",
"path_or_content",
",",
"encoding",
"=",
"CONF",
".",
"encoding",
")",
",",
"path_or_content",
"else",
":",
"buffer",
"=",
"io",
".",
"StringIO",
"(",
"path_or_content",
",",
")",
"# text io",
"elif",
"isinstance",
"(",
"path_or_content",
",",
"io",
".",
"TextIOBase",
")",
":",
"buffer",
"=",
"path_or_content",
"# bytes",
"elif",
"isinstance",
"(",
"path_or_content",
",",
"bytes",
")",
":",
"buffer",
"=",
"io",
".",
"StringIO",
"(",
"path_or_content",
".",
"decode",
"(",
"encoding",
"=",
"CONF",
".",
"encoding",
")",
")",
"elif",
"isinstance",
"(",
"path_or_content",
",",
"io",
".",
"BufferedIOBase",
")",
":",
"buffer",
"=",
"io",
".",
"StringIO",
"(",
"path_or_content",
".",
"read",
"(",
")",
".",
"decode",
"(",
"encoding",
"=",
"CONF",
".",
"encoding",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"path_or_content type could not be identified\"",
")",
"return",
"buffer",
",",
"path"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
Err.get_data
|
Parameters
----------
simulation_step: if not given, returns a raw report
error_category: if only one argument is specified, swaps dataframe report
|
oplus/err.py
|
def get_data(self, simulation_step=None, error_category=None):
"""
Parameters
----------
simulation_step: if not given, returns a raw report
error_category: if only one argument is specified, swaps dataframe report
"""
if simulation_step is None and error_category is None:
return self._df.dropna(axis="rows", how="all")
if simulation_step is not None:
if simulation_step not in self._simulation_step_list:
raise RuntimeError("The simulation_step '%s' is not referred in the error file." % simulation_step)
if error_category is not None:
if error_category not in self.CATEGORIES:
raise RuntimeError("The error_cat '%s' is wrong." % error_category)
iterables = [simulation_step, error_category]
columns = pd.MultiIndex.from_product(iterables)
series = self._df[simulation_step][error_category].dropna(axis="rows", how="all")
df = pd.DataFrame(index=series.index, columns=columns)
df[simulation_step] = series
return df
return self._df[simulation_step].dropna(axis="rows", how="all")
if error_category is not None:
if error_category not in self.CATEGORIES:
raise RuntimeError("The error_category '%s' is wrong." % error_category)
df = self._df.copy()
df.columns = df.columns.swaplevel(0, 1)
return df[error_category].dropna(axis="rows", how="all")
|
def get_data(self, simulation_step=None, error_category=None):
"""
Parameters
----------
simulation_step: if not given, returns a raw report
error_category: if only one argument is specified, swaps dataframe report
"""
if simulation_step is None and error_category is None:
return self._df.dropna(axis="rows", how="all")
if simulation_step is not None:
if simulation_step not in self._simulation_step_list:
raise RuntimeError("The simulation_step '%s' is not referred in the error file." % simulation_step)
if error_category is not None:
if error_category not in self.CATEGORIES:
raise RuntimeError("The error_cat '%s' is wrong." % error_category)
iterables = [simulation_step, error_category]
columns = pd.MultiIndex.from_product(iterables)
series = self._df[simulation_step][error_category].dropna(axis="rows", how="all")
df = pd.DataFrame(index=series.index, columns=columns)
df[simulation_step] = series
return df
return self._df[simulation_step].dropna(axis="rows", how="all")
if error_category is not None:
if error_category not in self.CATEGORIES:
raise RuntimeError("The error_category '%s' is wrong." % error_category)
df = self._df.copy()
df.columns = df.columns.swaplevel(0, 1)
return df[error_category].dropna(axis="rows", how="all")
|
[
"Parameters",
"----------",
"simulation_step",
":",
"if",
"not",
"given",
"returns",
"a",
"raw",
"report",
"error_category",
":",
"if",
"only",
"one",
"argument",
"is",
"specified",
"swaps",
"dataframe",
"report"
] |
openergy/oplus
|
python
|
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/err.py#L121-L153
|
[
"def",
"get_data",
"(",
"self",
",",
"simulation_step",
"=",
"None",
",",
"error_category",
"=",
"None",
")",
":",
"if",
"simulation_step",
"is",
"None",
"and",
"error_category",
"is",
"None",
":",
"return",
"self",
".",
"_df",
".",
"dropna",
"(",
"axis",
"=",
"\"rows\"",
",",
"how",
"=",
"\"all\"",
")",
"if",
"simulation_step",
"is",
"not",
"None",
":",
"if",
"simulation_step",
"not",
"in",
"self",
".",
"_simulation_step_list",
":",
"raise",
"RuntimeError",
"(",
"\"The simulation_step '%s' is not referred in the error file.\"",
"%",
"simulation_step",
")",
"if",
"error_category",
"is",
"not",
"None",
":",
"if",
"error_category",
"not",
"in",
"self",
".",
"CATEGORIES",
":",
"raise",
"RuntimeError",
"(",
"\"The error_cat '%s' is wrong.\"",
"%",
"error_category",
")",
"iterables",
"=",
"[",
"simulation_step",
",",
"error_category",
"]",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_product",
"(",
"iterables",
")",
"series",
"=",
"self",
".",
"_df",
"[",
"simulation_step",
"]",
"[",
"error_category",
"]",
".",
"dropna",
"(",
"axis",
"=",
"\"rows\"",
",",
"how",
"=",
"\"all\"",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"series",
".",
"index",
",",
"columns",
"=",
"columns",
")",
"df",
"[",
"simulation_step",
"]",
"=",
"series",
"return",
"df",
"return",
"self",
".",
"_df",
"[",
"simulation_step",
"]",
".",
"dropna",
"(",
"axis",
"=",
"\"rows\"",
",",
"how",
"=",
"\"all\"",
")",
"if",
"error_category",
"is",
"not",
"None",
":",
"if",
"error_category",
"not",
"in",
"self",
".",
"CATEGORIES",
":",
"raise",
"RuntimeError",
"(",
"\"The error_category '%s' is wrong.\"",
"%",
"error_category",
")",
"df",
"=",
"self",
".",
"_df",
".",
"copy",
"(",
")",
"df",
".",
"columns",
"=",
"df",
".",
"columns",
".",
"swaplevel",
"(",
"0",
",",
"1",
")",
"return",
"df",
"[",
"error_category",
"]",
".",
"dropna",
"(",
"axis",
"=",
"\"rows\"",
",",
"how",
"=",
"\"all\"",
")"
] |
f095868d1990c1d126e906ada6acbab26348b3d3
|
test
|
IntentContainer._create_regex
|
Create regex and return. If error occurs returns None.
|
padaos.py
|
def _create_regex(self, line, intent_name):
""" Create regex and return. If error occurs returns None. """
try:
return re.compile(self._create_intent_pattern(line, intent_name),
re.IGNORECASE)
except sre_constants.error as e:
LOG.warning('Failed to parse the line "{}" '
'for {}'.format(line, intent_name))
return None
|
def _create_regex(self, line, intent_name):
""" Create regex and return. If error occurs returns None. """
try:
return re.compile(self._create_intent_pattern(line, intent_name),
re.IGNORECASE)
except sre_constants.error as e:
LOG.warning('Failed to parse the line "{}" '
'for {}'.format(line, intent_name))
return None
|
[
"Create",
"regex",
"and",
"return",
".",
"If",
"error",
"occurs",
"returns",
"None",
"."
] |
MycroftAI/padaos
|
python
|
https://github.com/MycroftAI/padaos/blob/c7fb3d72fefbe552963c5a5a0f606e1f2fde2968/padaos.py#L104-L112
|
[
"def",
"_create_regex",
"(",
"self",
",",
"line",
",",
"intent_name",
")",
":",
"try",
":",
"return",
"re",
".",
"compile",
"(",
"self",
".",
"_create_intent_pattern",
"(",
"line",
",",
"intent_name",
")",
",",
"re",
".",
"IGNORECASE",
")",
"except",
"sre_constants",
".",
"error",
"as",
"e",
":",
"LOG",
".",
"warning",
"(",
"'Failed to parse the line \"{}\" '",
"'for {}'",
".",
"format",
"(",
"line",
",",
"intent_name",
")",
")",
"return",
"None"
] |
c7fb3d72fefbe552963c5a5a0f606e1f2fde2968
|
test
|
Constants.str
|
Convert status (id) to its string name.
|
pyca/db.py
|
def str(cls, value):
'''Convert status (id) to its string name.'''
for k, v in cls.__dict__.items():
if k[0] in string.ascii_uppercase and v == value:
return k.lower().replace('_', ' ')
|
def str(cls, value):
'''Convert status (id) to its string name.'''
for k, v in cls.__dict__.items():
if k[0] in string.ascii_uppercase and v == value:
return k.lower().replace('_', ' ')
|
[
"Convert",
"status",
"(",
"id",
")",
"to",
"its",
"string",
"name",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/db.py#L43-L47
|
[
"def",
"str",
"(",
"cls",
",",
"value",
")",
":",
"for",
"k",
",",
"v",
"in",
"cls",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"k",
"[",
"0",
"]",
"in",
"string",
".",
"ascii_uppercase",
"and",
"v",
"==",
"value",
":",
"return",
"k",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'_'",
",",
"' '",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
BaseEvent.remaining_duration
|
Returns the remaining duration for a recording.
|
pyca/db.py
|
def remaining_duration(self, time):
'''Returns the remaining duration for a recording.
'''
return max(0, self.end - max(self.start, time))
|
def remaining_duration(self, time):
'''Returns the remaining duration for a recording.
'''
return max(0, self.end - max(self.start, time))
|
[
"Returns",
"the",
"remaining",
"duration",
"for",
"a",
"recording",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/db.py#L115-L118
|
[
"def",
"remaining_duration",
"(",
"self",
",",
"time",
")",
":",
"return",
"max",
"(",
"0",
",",
"self",
".",
"end",
"-",
"max",
"(",
"self",
".",
"start",
",",
"time",
")",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
BaseEvent.serialize
|
Serialize this object as dictionary usable for conversion to JSON.
:return: Dictionary representing this object.
|
pyca/db.py
|
def serialize(self):
'''Serialize this object as dictionary usable for conversion to JSON.
:return: Dictionary representing this object.
'''
return {
'type': 'event',
'id': self.uid,
'attributes': {
'start': self.start,
'end': self.end,
'uid': self.uid,
'title': self.title,
'data': self.get_data(),
'status': Status.str(self.status)
}
}
|
def serialize(self):
'''Serialize this object as dictionary usable for conversion to JSON.
:return: Dictionary representing this object.
'''
return {
'type': 'event',
'id': self.uid,
'attributes': {
'start': self.start,
'end': self.end,
'uid': self.uid,
'title': self.title,
'data': self.get_data(),
'status': Status.str(self.status)
}
}
|
[
"Serialize",
"this",
"object",
"as",
"dictionary",
"usable",
"for",
"conversion",
"to",
"JSON",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/db.py#L144-L160
|
[
"def",
"serialize",
"(",
"self",
")",
":",
"return",
"{",
"'type'",
":",
"'event'",
",",
"'id'",
":",
"self",
".",
"uid",
",",
"'attributes'",
":",
"{",
"'start'",
":",
"self",
".",
"start",
",",
"'end'",
":",
"self",
".",
"end",
",",
"'uid'",
":",
"self",
".",
"uid",
",",
"'title'",
":",
"self",
".",
"title",
",",
"'data'",
":",
"self",
".",
"get_data",
"(",
")",
",",
"'status'",
":",
"Status",
".",
"str",
"(",
"self",
".",
"status",
")",
"}",
"}"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
http_request
|
Make an HTTP request to a given URL with optional parameters.
|
pyca/utils.py
|
def http_request(url, post_data=None):
'''Make an HTTP request to a given URL with optional parameters.
'''
logger.debug('Requesting URL: %s' % url)
buf = bio()
curl = pycurl.Curl()
curl.setopt(curl.URL, url.encode('ascii', 'ignore'))
# Disable HTTPS verification methods if insecure is set
if config()['server']['insecure']:
curl.setopt(curl.SSL_VERIFYPEER, 0)
curl.setopt(curl.SSL_VERIFYHOST, 0)
if config()['server']['certificate']:
# Make sure verification methods are turned on
curl.setopt(curl.SSL_VERIFYPEER, 1)
curl.setopt(curl.SSL_VERIFYHOST, 2)
# Import your certificates
curl.setopt(pycurl.CAINFO, config()['server']['certificate'])
if post_data:
curl.setopt(curl.HTTPPOST, post_data)
curl.setopt(curl.WRITEFUNCTION, buf.write)
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
curl.setopt(pycurl.USERPWD, "%s:%s" % (config()['server']['username'],
config()['server']['password']))
curl.setopt(curl.HTTPHEADER, ['X-Requested-Auth: Digest'])
curl.setopt(curl.FAILONERROR, True)
curl.setopt(curl.FOLLOWLOCATION, True)
curl.perform()
curl.close()
result = buf.getvalue()
buf.close()
return result
|
def http_request(url, post_data=None):
'''Make an HTTP request to a given URL with optional parameters.
'''
logger.debug('Requesting URL: %s' % url)
buf = bio()
curl = pycurl.Curl()
curl.setopt(curl.URL, url.encode('ascii', 'ignore'))
# Disable HTTPS verification methods if insecure is set
if config()['server']['insecure']:
curl.setopt(curl.SSL_VERIFYPEER, 0)
curl.setopt(curl.SSL_VERIFYHOST, 0)
if config()['server']['certificate']:
# Make sure verification methods are turned on
curl.setopt(curl.SSL_VERIFYPEER, 1)
curl.setopt(curl.SSL_VERIFYHOST, 2)
# Import your certificates
curl.setopt(pycurl.CAINFO, config()['server']['certificate'])
if post_data:
curl.setopt(curl.HTTPPOST, post_data)
curl.setopt(curl.WRITEFUNCTION, buf.write)
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
curl.setopt(pycurl.USERPWD, "%s:%s" % (config()['server']['username'],
config()['server']['password']))
curl.setopt(curl.HTTPHEADER, ['X-Requested-Auth: Digest'])
curl.setopt(curl.FAILONERROR, True)
curl.setopt(curl.FOLLOWLOCATION, True)
curl.perform()
curl.close()
result = buf.getvalue()
buf.close()
return result
|
[
"Make",
"an",
"HTTP",
"request",
"to",
"a",
"given",
"URL",
"with",
"optional",
"parameters",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L33-L66
|
[
"def",
"http_request",
"(",
"url",
",",
"post_data",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'Requesting URL: %s'",
"%",
"url",
")",
"buf",
"=",
"bio",
"(",
")",
"curl",
"=",
"pycurl",
".",
"Curl",
"(",
")",
"curl",
".",
"setopt",
"(",
"curl",
".",
"URL",
",",
"url",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
")",
"# Disable HTTPS verification methods if insecure is set",
"if",
"config",
"(",
")",
"[",
"'server'",
"]",
"[",
"'insecure'",
"]",
":",
"curl",
".",
"setopt",
"(",
"curl",
".",
"SSL_VERIFYPEER",
",",
"0",
")",
"curl",
".",
"setopt",
"(",
"curl",
".",
"SSL_VERIFYHOST",
",",
"0",
")",
"if",
"config",
"(",
")",
"[",
"'server'",
"]",
"[",
"'certificate'",
"]",
":",
"# Make sure verification methods are turned on",
"curl",
".",
"setopt",
"(",
"curl",
".",
"SSL_VERIFYPEER",
",",
"1",
")",
"curl",
".",
"setopt",
"(",
"curl",
".",
"SSL_VERIFYHOST",
",",
"2",
")",
"# Import your certificates",
"curl",
".",
"setopt",
"(",
"pycurl",
".",
"CAINFO",
",",
"config",
"(",
")",
"[",
"'server'",
"]",
"[",
"'certificate'",
"]",
")",
"if",
"post_data",
":",
"curl",
".",
"setopt",
"(",
"curl",
".",
"HTTPPOST",
",",
"post_data",
")",
"curl",
".",
"setopt",
"(",
"curl",
".",
"WRITEFUNCTION",
",",
"buf",
".",
"write",
")",
"curl",
".",
"setopt",
"(",
"pycurl",
".",
"HTTPAUTH",
",",
"pycurl",
".",
"HTTPAUTH_DIGEST",
")",
"curl",
".",
"setopt",
"(",
"pycurl",
".",
"USERPWD",
",",
"\"%s:%s\"",
"%",
"(",
"config",
"(",
")",
"[",
"'server'",
"]",
"[",
"'username'",
"]",
",",
"config",
"(",
")",
"[",
"'server'",
"]",
"[",
"'password'",
"]",
")",
")",
"curl",
".",
"setopt",
"(",
"curl",
".",
"HTTPHEADER",
",",
"[",
"'X-Requested-Auth: Digest'",
"]",
")",
"curl",
".",
"setopt",
"(",
"curl",
".",
"FAILONERROR",
",",
"True",
")",
"curl",
".",
"setopt",
"(",
"curl",
".",
"FOLLOWLOCATION",
",",
"True",
")",
"curl",
".",
"perform",
"(",
")",
"curl",
".",
"close",
"(",
")",
"result",
"=",
"buf",
".",
"getvalue",
"(",
")",
"buf",
".",
"close",
"(",
")",
"return",
"result"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
get_service
|
Get available service endpoints for a given service type from the
Opencast ServiceRegistry.
|
pyca/utils.py
|
def get_service(service_type):
'''Get available service endpoints for a given service type from the
Opencast ServiceRegistry.
'''
endpoint = '/services/available.json?serviceType=' + str(service_type)
url = '%s%s' % (config()['server']['url'], endpoint)
response = http_request(url).decode('utf-8')
services = (json.loads(response).get('services') or {}).get('service', [])
services = ensurelist(services)
endpoints = [service['host'] + service['path'] for service in services
if service['online'] and service['active']]
for endpoint in endpoints:
logger.info(u'Endpoint for %s: %s', service_type, endpoint)
return endpoints
|
def get_service(service_type):
'''Get available service endpoints for a given service type from the
Opencast ServiceRegistry.
'''
endpoint = '/services/available.json?serviceType=' + str(service_type)
url = '%s%s' % (config()['server']['url'], endpoint)
response = http_request(url).decode('utf-8')
services = (json.loads(response).get('services') or {}).get('service', [])
services = ensurelist(services)
endpoints = [service['host'] + service['path'] for service in services
if service['online'] and service['active']]
for endpoint in endpoints:
logger.info(u'Endpoint for %s: %s', service_type, endpoint)
return endpoints
|
[
"Get",
"available",
"service",
"endpoints",
"for",
"a",
"given",
"service",
"type",
"from",
"the",
"Opencast",
"ServiceRegistry",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L69-L82
|
[
"def",
"get_service",
"(",
"service_type",
")",
":",
"endpoint",
"=",
"'/services/available.json?serviceType='",
"+",
"str",
"(",
"service_type",
")",
"url",
"=",
"'%s%s'",
"%",
"(",
"config",
"(",
")",
"[",
"'server'",
"]",
"[",
"'url'",
"]",
",",
"endpoint",
")",
"response",
"=",
"http_request",
"(",
"url",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"services",
"=",
"(",
"json",
".",
"loads",
"(",
"response",
")",
".",
"get",
"(",
"'services'",
")",
"or",
"{",
"}",
")",
".",
"get",
"(",
"'service'",
",",
"[",
"]",
")",
"services",
"=",
"ensurelist",
"(",
"services",
")",
"endpoints",
"=",
"[",
"service",
"[",
"'host'",
"]",
"+",
"service",
"[",
"'path'",
"]",
"for",
"service",
"in",
"services",
"if",
"service",
"[",
"'online'",
"]",
"and",
"service",
"[",
"'active'",
"]",
"]",
"for",
"endpoint",
"in",
"endpoints",
":",
"logger",
".",
"info",
"(",
"u'Endpoint for %s: %s'",
",",
"service_type",
",",
"endpoint",
")",
"return",
"endpoints"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
unix_ts
|
Convert datetime into a unix timestamp.
This is the equivalent to Python 3's int(datetime.timestamp()).
:param dt: datetime to convert
|
pyca/utils.py
|
def unix_ts(dtval):
'''Convert datetime into a unix timestamp.
This is the equivalent to Python 3's int(datetime.timestamp()).
:param dt: datetime to convert
'''
epoch = datetime(1970, 1, 1, 0, 0, tzinfo=tzutc())
delta = (dtval - epoch)
return delta.days * 24 * 3600 + delta.seconds
|
def unix_ts(dtval):
'''Convert datetime into a unix timestamp.
This is the equivalent to Python 3's int(datetime.timestamp()).
:param dt: datetime to convert
'''
epoch = datetime(1970, 1, 1, 0, 0, tzinfo=tzutc())
delta = (dtval - epoch)
return delta.days * 24 * 3600 + delta.seconds
|
[
"Convert",
"datetime",
"into",
"a",
"unix",
"timestamp",
".",
"This",
"is",
"the",
"equivalent",
"to",
"Python",
"3",
"s",
"int",
"(",
"datetime",
".",
"timestamp",
"()",
")",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L85-L93
|
[
"def",
"unix_ts",
"(",
"dtval",
")",
":",
"epoch",
"=",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
",",
"0",
",",
"0",
",",
"tzinfo",
"=",
"tzutc",
"(",
")",
")",
"delta",
"=",
"(",
"dtval",
"-",
"epoch",
")",
"return",
"delta",
".",
"days",
"*",
"24",
"*",
"3600",
"+",
"delta",
".",
"seconds"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
try_mkdir
|
Try to create a directory. Pass without error if it already exists.
|
pyca/utils.py
|
def try_mkdir(directory):
'''Try to create a directory. Pass without error if it already exists.
'''
try:
os.mkdir(directory)
except OSError as err:
if err.errno != errno.EEXIST:
raise err
|
def try_mkdir(directory):
'''Try to create a directory. Pass without error if it already exists.
'''
try:
os.mkdir(directory)
except OSError as err:
if err.errno != errno.EEXIST:
raise err
|
[
"Try",
"to",
"create",
"a",
"directory",
".",
"Pass",
"without",
"error",
"if",
"it",
"already",
"exists",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L102-L109
|
[
"def",
"try_mkdir",
"(",
"directory",
")",
":",
"try",
":",
"os",
".",
"mkdir",
"(",
"directory",
")",
"except",
"OSError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise",
"err"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
configure_service
|
Get the location of a given service from Opencast and add it to the
current configuration.
|
pyca/utils.py
|
def configure_service(service):
'''Get the location of a given service from Opencast and add it to the
current configuration.
'''
while not config().get('service-' + service) and not terminate():
try:
config()['service-' + service] = \
get_service('org.opencastproject.' + service)
except pycurl.error as e:
logger.error('Could not get %s endpoint: %s. Retrying in 5s' %
(service, e))
time.sleep(5.0)
|
def configure_service(service):
'''Get the location of a given service from Opencast and add it to the
current configuration.
'''
while not config().get('service-' + service) and not terminate():
try:
config()['service-' + service] = \
get_service('org.opencastproject.' + service)
except pycurl.error as e:
logger.error('Could not get %s endpoint: %s. Retrying in 5s' %
(service, e))
time.sleep(5.0)
|
[
"Get",
"the",
"location",
"of",
"a",
"given",
"service",
"from",
"Opencast",
"and",
"add",
"it",
"to",
"the",
"current",
"configuration",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L112-L123
|
[
"def",
"configure_service",
"(",
"service",
")",
":",
"while",
"not",
"config",
"(",
")",
".",
"get",
"(",
"'service-'",
"+",
"service",
")",
"and",
"not",
"terminate",
"(",
")",
":",
"try",
":",
"config",
"(",
")",
"[",
"'service-'",
"+",
"service",
"]",
"=",
"get_service",
"(",
"'org.opencastproject.'",
"+",
"service",
")",
"except",
"pycurl",
".",
"error",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Could not get %s endpoint: %s. Retrying in 5s'",
"%",
"(",
"service",
",",
"e",
")",
")",
"time",
".",
"sleep",
"(",
"5.0",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
register_ca
|
Register this capture agent at the Matterhorn admin server so that it
shows up in the admin interface.
:param address: Address of the capture agent web ui
:param status: Current status of the capture agent
|
pyca/utils.py
|
def register_ca(status='idle'):
'''Register this capture agent at the Matterhorn admin server so that it
shows up in the admin interface.
:param address: Address of the capture agent web ui
:param status: Current status of the capture agent
'''
# If this is a backup CA we don't tell the Matterhorn core that we are
# here. We will just run silently in the background:
if config()['agent']['backup_mode']:
return
params = [('address', config()['ui']['url']), ('state', status)]
name = urlquote(config()['agent']['name'].encode('utf-8'), safe='')
url = '%s/agents/%s' % (config()['service-capture.admin'][0], name)
try:
response = http_request(url, params).decode('utf-8')
if response:
logger.info(response)
except pycurl.error as e:
logger.warning('Could not set agent state to %s: %s' % (status, e))
|
def register_ca(status='idle'):
'''Register this capture agent at the Matterhorn admin server so that it
shows up in the admin interface.
:param address: Address of the capture agent web ui
:param status: Current status of the capture agent
'''
# If this is a backup CA we don't tell the Matterhorn core that we are
# here. We will just run silently in the background:
if config()['agent']['backup_mode']:
return
params = [('address', config()['ui']['url']), ('state', status)]
name = urlquote(config()['agent']['name'].encode('utf-8'), safe='')
url = '%s/agents/%s' % (config()['service-capture.admin'][0], name)
try:
response = http_request(url, params).decode('utf-8')
if response:
logger.info(response)
except pycurl.error as e:
logger.warning('Could not set agent state to %s: %s' % (status, e))
|
[
"Register",
"this",
"capture",
"agent",
"at",
"the",
"Matterhorn",
"admin",
"server",
"so",
"that",
"it",
"shows",
"up",
"in",
"the",
"admin",
"interface",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L131-L150
|
[
"def",
"register_ca",
"(",
"status",
"=",
"'idle'",
")",
":",
"# If this is a backup CA we don't tell the Matterhorn core that we are",
"# here. We will just run silently in the background:",
"if",
"config",
"(",
")",
"[",
"'agent'",
"]",
"[",
"'backup_mode'",
"]",
":",
"return",
"params",
"=",
"[",
"(",
"'address'",
",",
"config",
"(",
")",
"[",
"'ui'",
"]",
"[",
"'url'",
"]",
")",
",",
"(",
"'state'",
",",
"status",
")",
"]",
"name",
"=",
"urlquote",
"(",
"config",
"(",
")",
"[",
"'agent'",
"]",
"[",
"'name'",
"]",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"safe",
"=",
"''",
")",
"url",
"=",
"'%s/agents/%s'",
"%",
"(",
"config",
"(",
")",
"[",
"'service-capture.admin'",
"]",
"[",
"0",
"]",
",",
"name",
")",
"try",
":",
"response",
"=",
"http_request",
"(",
"url",
",",
"params",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"response",
":",
"logger",
".",
"info",
"(",
"response",
")",
"except",
"pycurl",
".",
"error",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"'Could not set agent state to %s: %s'",
"%",
"(",
"status",
",",
"e",
")",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
recording_state
|
Send the state of the current recording to the Matterhorn core.
:param recording_id: ID of the current recording
:param status: Status of the recording
|
pyca/utils.py
|
def recording_state(recording_id, status):
'''Send the state of the current recording to the Matterhorn core.
:param recording_id: ID of the current recording
:param status: Status of the recording
'''
# If this is a backup CA we do not update the recording state since the
# actual CA does that and we want to interfere. We will just run silently
# in the background:
if config()['agent']['backup_mode']:
return
params = [('state', status)]
url = config()['service-capture.admin'][0]
url += '/recordings/%s' % recording_id
try:
result = http_request(url, params)
logger.info(result)
except pycurl.error as e:
logger.warning('Could not set recording state to %s: %s' % (status, e))
|
def recording_state(recording_id, status):
'''Send the state of the current recording to the Matterhorn core.
:param recording_id: ID of the current recording
:param status: Status of the recording
'''
# If this is a backup CA we do not update the recording state since the
# actual CA does that and we want to interfere. We will just run silently
# in the background:
if config()['agent']['backup_mode']:
return
params = [('state', status)]
url = config()['service-capture.admin'][0]
url += '/recordings/%s' % recording_id
try:
result = http_request(url, params)
logger.info(result)
except pycurl.error as e:
logger.warning('Could not set recording state to %s: %s' % (status, e))
|
[
"Send",
"the",
"state",
"of",
"the",
"current",
"recording",
"to",
"the",
"Matterhorn",
"core",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L153-L171
|
[
"def",
"recording_state",
"(",
"recording_id",
",",
"status",
")",
":",
"# If this is a backup CA we do not update the recording state since the",
"# actual CA does that and we want to interfere. We will just run silently",
"# in the background:",
"if",
"config",
"(",
")",
"[",
"'agent'",
"]",
"[",
"'backup_mode'",
"]",
":",
"return",
"params",
"=",
"[",
"(",
"'state'",
",",
"status",
")",
"]",
"url",
"=",
"config",
"(",
")",
"[",
"'service-capture.admin'",
"]",
"[",
"0",
"]",
"url",
"+=",
"'/recordings/%s'",
"%",
"recording_id",
"try",
":",
"result",
"=",
"http_request",
"(",
"url",
",",
"params",
")",
"logger",
".",
"info",
"(",
"result",
")",
"except",
"pycurl",
".",
"error",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"'Could not set recording state to %s: %s'",
"%",
"(",
"status",
",",
"e",
")",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
update_event_status
|
Update the status of a particular event in the database.
|
pyca/utils.py
|
def update_event_status(event, status):
'''Update the status of a particular event in the database.
'''
dbs = db.get_session()
dbs.query(db.RecordedEvent).filter(db.RecordedEvent.start == event.start)\
.update({'status': status})
event.status = status
dbs.commit()
|
def update_event_status(event, status):
'''Update the status of a particular event in the database.
'''
dbs = db.get_session()
dbs.query(db.RecordedEvent).filter(db.RecordedEvent.start == event.start)\
.update({'status': status})
event.status = status
dbs.commit()
|
[
"Update",
"the",
"status",
"of",
"a",
"particular",
"event",
"in",
"the",
"database",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L174-L181
|
[
"def",
"update_event_status",
"(",
"event",
",",
"status",
")",
":",
"dbs",
"=",
"db",
".",
"get_session",
"(",
")",
"dbs",
".",
"query",
"(",
"db",
".",
"RecordedEvent",
")",
".",
"filter",
"(",
"db",
".",
"RecordedEvent",
".",
"start",
"==",
"event",
".",
"start",
")",
".",
"update",
"(",
"{",
"'status'",
":",
"status",
"}",
")",
"event",
".",
"status",
"=",
"status",
"dbs",
".",
"commit",
"(",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
set_service_status
|
Update the status of a particular service in the database.
|
pyca/utils.py
|
def set_service_status(service, status):
'''Update the status of a particular service in the database.
'''
srv = db.ServiceStates()
srv.type = service
srv.status = status
dbs = db.get_session()
dbs.merge(srv)
dbs.commit()
dbs.close()
|
def set_service_status(service, status):
'''Update the status of a particular service in the database.
'''
srv = db.ServiceStates()
srv.type = service
srv.status = status
dbs = db.get_session()
dbs.merge(srv)
dbs.commit()
dbs.close()
|
[
"Update",
"the",
"status",
"of",
"a",
"particular",
"service",
"in",
"the",
"database",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L184-L194
|
[
"def",
"set_service_status",
"(",
"service",
",",
"status",
")",
":",
"srv",
"=",
"db",
".",
"ServiceStates",
"(",
")",
"srv",
".",
"type",
"=",
"service",
"srv",
".",
"status",
"=",
"status",
"dbs",
"=",
"db",
".",
"get_session",
"(",
")",
"dbs",
".",
"merge",
"(",
"srv",
")",
"dbs",
".",
"commit",
"(",
")",
"dbs",
".",
"close",
"(",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
get_service_status
|
Update the status of a particular service in the database.
|
pyca/utils.py
|
def get_service_status(service):
'''Update the status of a particular service in the database.
'''
dbs = db.get_session()
srvs = dbs.query(db.ServiceStates).filter(db.ServiceStates.type == service)
if srvs.count():
return srvs[0].status
return db.ServiceStatus.STOPPED
|
def get_service_status(service):
'''Update the status of a particular service in the database.
'''
dbs = db.get_session()
srvs = dbs.query(db.ServiceStates).filter(db.ServiceStates.type == service)
if srvs.count():
return srvs[0].status
return db.ServiceStatus.STOPPED
|
[
"Update",
"the",
"status",
"of",
"a",
"particular",
"service",
"in",
"the",
"database",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L205-L214
|
[
"def",
"get_service_status",
"(",
"service",
")",
":",
"dbs",
"=",
"db",
".",
"get_session",
"(",
")",
"srvs",
"=",
"dbs",
".",
"query",
"(",
"db",
".",
"ServiceStates",
")",
".",
"filter",
"(",
"db",
".",
"ServiceStates",
".",
"type",
"==",
"service",
")",
"if",
"srvs",
".",
"count",
"(",
")",
":",
"return",
"srvs",
"[",
"0",
"]",
".",
"status",
"return",
"db",
".",
"ServiceStatus",
".",
"STOPPED"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
update_agent_state
|
Update the current agent state in opencast.
|
pyca/utils.py
|
def update_agent_state():
'''Update the current agent state in opencast.
'''
configure_service('capture.admin')
status = 'idle'
# Determine reported agent state with priority list
if get_service_status(db.Service.SCHEDULE) == db.ServiceStatus.STOPPED:
status = 'offline'
elif get_service_status(db.Service.CAPTURE) == db.ServiceStatus.BUSY:
status = 'capturing'
elif get_service_status(db.Service.INGEST) == db.ServiceStatus.BUSY:
status = 'uploading'
register_ca(status=status)
|
def update_agent_state():
'''Update the current agent state in opencast.
'''
configure_service('capture.admin')
status = 'idle'
# Determine reported agent state with priority list
if get_service_status(db.Service.SCHEDULE) == db.ServiceStatus.STOPPED:
status = 'offline'
elif get_service_status(db.Service.CAPTURE) == db.ServiceStatus.BUSY:
status = 'capturing'
elif get_service_status(db.Service.INGEST) == db.ServiceStatus.BUSY:
status = 'uploading'
register_ca(status=status)
|
[
"Update",
"the",
"current",
"agent",
"state",
"in",
"opencast",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/utils.py#L217-L231
|
[
"def",
"update_agent_state",
"(",
")",
":",
"configure_service",
"(",
"'capture.admin'",
")",
"status",
"=",
"'idle'",
"# Determine reported agent state with priority list",
"if",
"get_service_status",
"(",
"db",
".",
"Service",
".",
"SCHEDULE",
")",
"==",
"db",
".",
"ServiceStatus",
".",
"STOPPED",
":",
"status",
"=",
"'offline'",
"elif",
"get_service_status",
"(",
"db",
".",
"Service",
".",
"CAPTURE",
")",
"==",
"db",
".",
"ServiceStatus",
".",
"BUSY",
":",
"status",
"=",
"'capturing'",
"elif",
"get_service_status",
"(",
"db",
".",
"Service",
".",
"INGEST",
")",
"==",
"db",
".",
"ServiceStatus",
".",
"BUSY",
":",
"status",
"=",
"'uploading'",
"register_ca",
"(",
"status",
"=",
"status",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
configuration_file
|
Find the best match for the configuration file.
|
pyca/config.py
|
def configuration_file(cfgfile):
'''Find the best match for the configuration file.
'''
if cfgfile is not None:
return cfgfile
# If no file is explicitely specified, probe for the configuration file
# location.
cfg = './etc/pyca.conf'
if not os.path.isfile(cfg):
return '/etc/pyca.conf'
return cfg
|
def configuration_file(cfgfile):
'''Find the best match for the configuration file.
'''
if cfgfile is not None:
return cfgfile
# If no file is explicitely specified, probe for the configuration file
# location.
cfg = './etc/pyca.conf'
if not os.path.isfile(cfg):
return '/etc/pyca.conf'
return cfg
|
[
"Find",
"the",
"best",
"match",
"for",
"the",
"configuration",
"file",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/config.py#L62-L72
|
[
"def",
"configuration_file",
"(",
"cfgfile",
")",
":",
"if",
"cfgfile",
"is",
"not",
"None",
":",
"return",
"cfgfile",
"# If no file is explicitely specified, probe for the configuration file",
"# location.",
"cfg",
"=",
"'./etc/pyca.conf'",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"cfg",
")",
":",
"return",
"'/etc/pyca.conf'",
"return",
"cfg"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
update_configuration
|
Update configuration from file.
:param cfgfile: Configuration file to load.
|
pyca/config.py
|
def update_configuration(cfgfile=None):
'''Update configuration from file.
:param cfgfile: Configuration file to load.
'''
configobj.DEFAULT_INTERPOLATION = 'template'
cfgfile = configuration_file(cfgfile)
cfg = configobj.ConfigObj(cfgfile, configspec=cfgspec, encoding='utf-8')
validator = Validator()
val = cfg.validate(validator)
if val is not True:
raise ValueError('Invalid configuration: %s' % val)
if len(cfg['capture']['files']) != len(cfg['capture']['flavors']):
raise ValueError('List of files and flavors do not match')
globals()['__config'] = cfg
logger_init()
if cfg['server'].get('url', '').endswith('/'):
logger.warning('Base URL ends with /. This is most likely a '
'configuration error. The URL should contain nothing '
'of the service paths.')
logger.info('Configuration loaded from %s' % cfgfile)
check()
return cfg
|
def update_configuration(cfgfile=None):
'''Update configuration from file.
:param cfgfile: Configuration file to load.
'''
configobj.DEFAULT_INTERPOLATION = 'template'
cfgfile = configuration_file(cfgfile)
cfg = configobj.ConfigObj(cfgfile, configspec=cfgspec, encoding='utf-8')
validator = Validator()
val = cfg.validate(validator)
if val is not True:
raise ValueError('Invalid configuration: %s' % val)
if len(cfg['capture']['files']) != len(cfg['capture']['flavors']):
raise ValueError('List of files and flavors do not match')
globals()['__config'] = cfg
logger_init()
if cfg['server'].get('url', '').endswith('/'):
logger.warning('Base URL ends with /. This is most likely a '
'configuration error. The URL should contain nothing '
'of the service paths.')
logger.info('Configuration loaded from %s' % cfgfile)
check()
return cfg
|
[
"Update",
"configuration",
"from",
"file",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/config.py#L75-L97
|
[
"def",
"update_configuration",
"(",
"cfgfile",
"=",
"None",
")",
":",
"configobj",
".",
"DEFAULT_INTERPOLATION",
"=",
"'template'",
"cfgfile",
"=",
"configuration_file",
"(",
"cfgfile",
")",
"cfg",
"=",
"configobj",
".",
"ConfigObj",
"(",
"cfgfile",
",",
"configspec",
"=",
"cfgspec",
",",
"encoding",
"=",
"'utf-8'",
")",
"validator",
"=",
"Validator",
"(",
")",
"val",
"=",
"cfg",
".",
"validate",
"(",
"validator",
")",
"if",
"val",
"is",
"not",
"True",
":",
"raise",
"ValueError",
"(",
"'Invalid configuration: %s'",
"%",
"val",
")",
"if",
"len",
"(",
"cfg",
"[",
"'capture'",
"]",
"[",
"'files'",
"]",
")",
"!=",
"len",
"(",
"cfg",
"[",
"'capture'",
"]",
"[",
"'flavors'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'List of files and flavors do not match'",
")",
"globals",
"(",
")",
"[",
"'__config'",
"]",
"=",
"cfg",
"logger_init",
"(",
")",
"if",
"cfg",
"[",
"'server'",
"]",
".",
"get",
"(",
"'url'",
",",
"''",
")",
".",
"endswith",
"(",
"'/'",
")",
":",
"logger",
".",
"warning",
"(",
"'Base URL ends with /. This is most likely a '",
"'configuration error. The URL should contain nothing '",
"'of the service paths.'",
")",
"logger",
".",
"info",
"(",
"'Configuration loaded from %s'",
"%",
"cfgfile",
")",
"check",
"(",
")",
"return",
"cfg"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
check
|
Check configuration for sanity.
|
pyca/config.py
|
def check():
'''Check configuration for sanity.
'''
if config('server')['insecure']:
logger.warning('HTTPS CHECKS ARE TURNED OFF. A SECURE CONNECTION IS '
'NOT GUARANTEED')
if config('server')['certificate']:
# Ensure certificate exists and is readable
open(config('server')['certificate'], 'rb').close()
if config('agent')['backup_mode']:
logger.info('Agent runs in backup mode. No data will be sent to '
'Opencast')
|
def check():
'''Check configuration for sanity.
'''
if config('server')['insecure']:
logger.warning('HTTPS CHECKS ARE TURNED OFF. A SECURE CONNECTION IS '
'NOT GUARANTEED')
if config('server')['certificate']:
# Ensure certificate exists and is readable
open(config('server')['certificate'], 'rb').close()
if config('agent')['backup_mode']:
logger.info('Agent runs in backup mode. No data will be sent to '
'Opencast')
|
[
"Check",
"configuration",
"for",
"sanity",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/config.py#L100-L111
|
[
"def",
"check",
"(",
")",
":",
"if",
"config",
"(",
"'server'",
")",
"[",
"'insecure'",
"]",
":",
"logger",
".",
"warning",
"(",
"'HTTPS CHECKS ARE TURNED OFF. A SECURE CONNECTION IS '",
"'NOT GUARANTEED'",
")",
"if",
"config",
"(",
"'server'",
")",
"[",
"'certificate'",
"]",
":",
"# Ensure certificate exists and is readable",
"open",
"(",
"config",
"(",
"'server'",
")",
"[",
"'certificate'",
"]",
",",
"'rb'",
")",
".",
"close",
"(",
")",
"if",
"config",
"(",
"'agent'",
")",
"[",
"'backup_mode'",
"]",
":",
"logger",
".",
"info",
"(",
"'Agent runs in backup mode. No data will be sent to '",
"'Opencast'",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
logger_init
|
Initialize logger based on configuration
|
pyca/config.py
|
def logger_init():
'''Initialize logger based on configuration
'''
handlers = []
logconf = config('logging')
if logconf['syslog']:
handlers.append(logging.handlers.SysLogHandler(address='/dev/log'))
if logconf['stderr']:
handlers.append(logging.StreamHandler(sys.stderr))
if logconf['file']:
handlers.append(logging.handlers.WatchedFileHandler(logconf['file']))
for handler in handlers:
handler.setFormatter(logging.Formatter(logconf['format']))
logging.root.addHandler(handler)
logging.root.setLevel(logconf['level'].upper())
logger.info('Log level set to %s' % logconf['level'])
|
def logger_init():
'''Initialize logger based on configuration
'''
handlers = []
logconf = config('logging')
if logconf['syslog']:
handlers.append(logging.handlers.SysLogHandler(address='/dev/log'))
if logconf['stderr']:
handlers.append(logging.StreamHandler(sys.stderr))
if logconf['file']:
handlers.append(logging.handlers.WatchedFileHandler(logconf['file']))
for handler in handlers:
handler.setFormatter(logging.Formatter(logconf['format']))
logging.root.addHandler(handler)
logging.root.setLevel(logconf['level'].upper())
logger.info('Log level set to %s' % logconf['level'])
|
[
"Initialize",
"logger",
"based",
"on",
"configuration"
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/config.py#L119-L135
|
[
"def",
"logger_init",
"(",
")",
":",
"handlers",
"=",
"[",
"]",
"logconf",
"=",
"config",
"(",
"'logging'",
")",
"if",
"logconf",
"[",
"'syslog'",
"]",
":",
"handlers",
".",
"append",
"(",
"logging",
".",
"handlers",
".",
"SysLogHandler",
"(",
"address",
"=",
"'/dev/log'",
")",
")",
"if",
"logconf",
"[",
"'stderr'",
"]",
":",
"handlers",
".",
"append",
"(",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stderr",
")",
")",
"if",
"logconf",
"[",
"'file'",
"]",
":",
"handlers",
".",
"append",
"(",
"logging",
".",
"handlers",
".",
"WatchedFileHandler",
"(",
"logconf",
"[",
"'file'",
"]",
")",
")",
"for",
"handler",
"in",
"handlers",
":",
"handler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"logconf",
"[",
"'format'",
"]",
")",
")",
"logging",
".",
"root",
".",
"addHandler",
"(",
"handler",
")",
"logging",
".",
"root",
".",
"setLevel",
"(",
"logconf",
"[",
"'level'",
"]",
".",
"upper",
"(",
")",
")",
"logger",
".",
"info",
"(",
"'Log level set to %s'",
"%",
"logconf",
"[",
"'level'",
"]",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
home
|
Serve the status page of the capture agent.
|
pyca/ui/__init__.py
|
def home():
'''Serve the status page of the capture agent.
'''
# Get IDs of existing preview images
preview = config()['capture']['preview']
previewdir = config()['capture']['preview_dir']
preview = [p.replace('{{previewdir}}', previewdir) for p in preview]
preview = zip(preview, range(len(preview)))
preview = [p[1] for p in preview if os.path.isfile(p[0])]
# Get limits for recording table
try:
limit_upcoming = int(request.args.get('limit_upcoming', 5))
limit_processed = int(request.args.get('limit_processed', 15))
except ValueError:
limit_upcoming = 5
limit_processed = 15
db = get_session()
upcoming_events = db.query(UpcomingEvent)\
.order_by(UpcomingEvent.start)\
.limit(limit_upcoming)
recorded_events = db.query(RecordedEvent)\
.order_by(RecordedEvent.start.desc())\
.limit(limit_processed)
recording = get_service_status(Service.CAPTURE) == ServiceStatus.BUSY
uploading = get_service_status(Service.INGEST) == ServiceStatus.BUSY
processed = db.query(RecordedEvent).count()
upcoming = db.query(UpcomingEvent).count()
return render_template('home.html', preview=preview, config=config(),
recorded_events=recorded_events,
upcoming_events=upcoming_events,
recording=recording, uploading=uploading,
processed=processed, upcoming=upcoming,
limit_upcoming=limit_upcoming,
limit_processed=limit_processed,
dtfmt=dtfmt)
|
def home():
'''Serve the status page of the capture agent.
'''
# Get IDs of existing preview images
preview = config()['capture']['preview']
previewdir = config()['capture']['preview_dir']
preview = [p.replace('{{previewdir}}', previewdir) for p in preview]
preview = zip(preview, range(len(preview)))
preview = [p[1] for p in preview if os.path.isfile(p[0])]
# Get limits for recording table
try:
limit_upcoming = int(request.args.get('limit_upcoming', 5))
limit_processed = int(request.args.get('limit_processed', 15))
except ValueError:
limit_upcoming = 5
limit_processed = 15
db = get_session()
upcoming_events = db.query(UpcomingEvent)\
.order_by(UpcomingEvent.start)\
.limit(limit_upcoming)
recorded_events = db.query(RecordedEvent)\
.order_by(RecordedEvent.start.desc())\
.limit(limit_processed)
recording = get_service_status(Service.CAPTURE) == ServiceStatus.BUSY
uploading = get_service_status(Service.INGEST) == ServiceStatus.BUSY
processed = db.query(RecordedEvent).count()
upcoming = db.query(UpcomingEvent).count()
return render_template('home.html', preview=preview, config=config(),
recorded_events=recorded_events,
upcoming_events=upcoming_events,
recording=recording, uploading=uploading,
processed=processed, upcoming=upcoming,
limit_upcoming=limit_upcoming,
limit_processed=limit_processed,
dtfmt=dtfmt)
|
[
"Serve",
"the",
"status",
"page",
"of",
"the",
"capture",
"agent",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/__init__.py#L18-L55
|
[
"def",
"home",
"(",
")",
":",
"# Get IDs of existing preview images",
"preview",
"=",
"config",
"(",
")",
"[",
"'capture'",
"]",
"[",
"'preview'",
"]",
"previewdir",
"=",
"config",
"(",
")",
"[",
"'capture'",
"]",
"[",
"'preview_dir'",
"]",
"preview",
"=",
"[",
"p",
".",
"replace",
"(",
"'{{previewdir}}'",
",",
"previewdir",
")",
"for",
"p",
"in",
"preview",
"]",
"preview",
"=",
"zip",
"(",
"preview",
",",
"range",
"(",
"len",
"(",
"preview",
")",
")",
")",
"preview",
"=",
"[",
"p",
"[",
"1",
"]",
"for",
"p",
"in",
"preview",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"p",
"[",
"0",
"]",
")",
"]",
"# Get limits for recording table",
"try",
":",
"limit_upcoming",
"=",
"int",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'limit_upcoming'",
",",
"5",
")",
")",
"limit_processed",
"=",
"int",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'limit_processed'",
",",
"15",
")",
")",
"except",
"ValueError",
":",
"limit_upcoming",
"=",
"5",
"limit_processed",
"=",
"15",
"db",
"=",
"get_session",
"(",
")",
"upcoming_events",
"=",
"db",
".",
"query",
"(",
"UpcomingEvent",
")",
".",
"order_by",
"(",
"UpcomingEvent",
".",
"start",
")",
".",
"limit",
"(",
"limit_upcoming",
")",
"recorded_events",
"=",
"db",
".",
"query",
"(",
"RecordedEvent",
")",
".",
"order_by",
"(",
"RecordedEvent",
".",
"start",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"limit_processed",
")",
"recording",
"=",
"get_service_status",
"(",
"Service",
".",
"CAPTURE",
")",
"==",
"ServiceStatus",
".",
"BUSY",
"uploading",
"=",
"get_service_status",
"(",
"Service",
".",
"INGEST",
")",
"==",
"ServiceStatus",
".",
"BUSY",
"processed",
"=",
"db",
".",
"query",
"(",
"RecordedEvent",
")",
".",
"count",
"(",
")",
"upcoming",
"=",
"db",
".",
"query",
"(",
"UpcomingEvent",
")",
".",
"count",
"(",
")",
"return",
"render_template",
"(",
"'home.html'",
",",
"preview",
"=",
"preview",
",",
"config",
"=",
"config",
"(",
")",
",",
"recorded_events",
"=",
"recorded_events",
",",
"upcoming_events",
"=",
"upcoming_events",
",",
"recording",
"=",
"recording",
",",
"uploading",
"=",
"uploading",
",",
"processed",
"=",
"processed",
",",
"upcoming",
"=",
"upcoming",
",",
"limit_upcoming",
"=",
"limit_upcoming",
",",
"limit_processed",
"=",
"limit_processed",
",",
"dtfmt",
"=",
"dtfmt",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
serve_image
|
Serve the preview image with the given id
|
pyca/ui/__init__.py
|
def serve_image(image_id):
'''Serve the preview image with the given id
'''
try:
preview_dir = config()['capture']['preview_dir']
filepath = config()['capture']['preview'][image_id]
filepath = filepath.replace('{{previewdir}}', preview_dir)
filepath = os.path.abspath(filepath)
if os.path.isfile(filepath):
directory, filename = filepath.rsplit('/', 1)
return send_from_directory(directory, filename)
except (IndexError, KeyError):
pass
return '', 404
|
def serve_image(image_id):
'''Serve the preview image with the given id
'''
try:
preview_dir = config()['capture']['preview_dir']
filepath = config()['capture']['preview'][image_id]
filepath = filepath.replace('{{previewdir}}', preview_dir)
filepath = os.path.abspath(filepath)
if os.path.isfile(filepath):
directory, filename = filepath.rsplit('/', 1)
return send_from_directory(directory, filename)
except (IndexError, KeyError):
pass
return '', 404
|
[
"Serve",
"the",
"preview",
"image",
"with",
"the",
"given",
"id"
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/__init__.py#L60-L73
|
[
"def",
"serve_image",
"(",
"image_id",
")",
":",
"try",
":",
"preview_dir",
"=",
"config",
"(",
")",
"[",
"'capture'",
"]",
"[",
"'preview_dir'",
"]",
"filepath",
"=",
"config",
"(",
")",
"[",
"'capture'",
"]",
"[",
"'preview'",
"]",
"[",
"image_id",
"]",
"filepath",
"=",
"filepath",
".",
"replace",
"(",
"'{{previewdir}}'",
",",
"preview_dir",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filepath",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"directory",
",",
"filename",
"=",
"filepath",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"return",
"send_from_directory",
"(",
"directory",
",",
"filename",
")",
"except",
"(",
"IndexError",
",",
"KeyError",
")",
":",
"pass",
"return",
"''",
",",
"404"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
sigterm_handler
|
Intercept sigterm and terminate all processes.
|
pyca/__main__.py
|
def sigterm_handler(signum, frame):
'''Intercept sigterm and terminate all processes.
'''
sigint_handler(signum, frame)
for process in multiprocessing.active_children():
process.terminate()
sys.exit(0)
|
def sigterm_handler(signum, frame):
'''Intercept sigterm and terminate all processes.
'''
sigint_handler(signum, frame)
for process in multiprocessing.active_children():
process.terminate()
sys.exit(0)
|
[
"Intercept",
"sigterm",
"and",
"terminate",
"all",
"processes",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/__main__.py#L52-L58
|
[
"def",
"sigterm_handler",
"(",
"signum",
",",
"frame",
")",
":",
"sigint_handler",
"(",
"signum",
",",
"frame",
")",
"for",
"process",
"in",
"multiprocessing",
".",
"active_children",
"(",
")",
":",
"process",
".",
"terminate",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
run_all
|
Start all services.
|
pyca/__main__.py
|
def run_all(*modules):
'''Start all services.
'''
processes = [multiprocessing.Process(target=mod.run) for mod in modules]
for p in processes:
p.start()
for p in processes:
p.join()
|
def run_all(*modules):
'''Start all services.
'''
processes = [multiprocessing.Process(target=mod.run) for mod in modules]
for p in processes:
p.start()
for p in processes:
p.join()
|
[
"Start",
"all",
"services",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/__main__.py#L61-L68
|
[
"def",
"run_all",
"(",
"*",
"modules",
")",
":",
"processes",
"=",
"[",
"multiprocessing",
".",
"Process",
"(",
"target",
"=",
"mod",
".",
"run",
")",
"for",
"mod",
"in",
"modules",
"]",
"for",
"p",
"in",
"processes",
":",
"p",
".",
"start",
"(",
")",
"for",
"p",
"in",
"processes",
":",
"p",
".",
"join",
"(",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
parse_ical
|
Parse Opencast schedule iCalendar file and return events as dict
|
pyca/schedule.py
|
def parse_ical(vcal):
'''Parse Opencast schedule iCalendar file and return events as dict
'''
vcal = vcal.replace('\r\n ', '').replace('\r\n\r\n', '\r\n')
vevents = vcal.split('\r\nBEGIN:VEVENT\r\n')
del(vevents[0])
events = []
for vevent in vevents:
event = {}
for line in vevent.split('\r\n'):
line = line.split(':', 1)
key = line[0].lower()
if len(line) <= 1 or key == 'end':
continue
if key.startswith('dt'):
event[key] = unix_ts(dateutil.parser.parse(line[1]))
continue
if not key.startswith('attach'):
event[key] = line[1]
continue
# finally handle attachments
event['attach'] = event.get('attach', [])
attachment = {}
for x in [x.split('=') for x in line[0].split(';')]:
if x[0].lower() in ['fmttype', 'x-apple-filename']:
attachment[x[0].lower()] = x[1]
attachment['data'] = b64decode(line[1]).decode('utf-8')
event['attach'].append(attachment)
events.append(event)
return events
|
def parse_ical(vcal):
'''Parse Opencast schedule iCalendar file and return events as dict
'''
vcal = vcal.replace('\r\n ', '').replace('\r\n\r\n', '\r\n')
vevents = vcal.split('\r\nBEGIN:VEVENT\r\n')
del(vevents[0])
events = []
for vevent in vevents:
event = {}
for line in vevent.split('\r\n'):
line = line.split(':', 1)
key = line[0].lower()
if len(line) <= 1 or key == 'end':
continue
if key.startswith('dt'):
event[key] = unix_ts(dateutil.parser.parse(line[1]))
continue
if not key.startswith('attach'):
event[key] = line[1]
continue
# finally handle attachments
event['attach'] = event.get('attach', [])
attachment = {}
for x in [x.split('=') for x in line[0].split(';')]:
if x[0].lower() in ['fmttype', 'x-apple-filename']:
attachment[x[0].lower()] = x[1]
attachment['data'] = b64decode(line[1]).decode('utf-8')
event['attach'].append(attachment)
events.append(event)
return events
|
[
"Parse",
"Opencast",
"schedule",
"iCalendar",
"file",
"and",
"return",
"events",
"as",
"dict"
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/schedule.py#L32-L61
|
[
"def",
"parse_ical",
"(",
"vcal",
")",
":",
"vcal",
"=",
"vcal",
".",
"replace",
"(",
"'\\r\\n '",
",",
"''",
")",
".",
"replace",
"(",
"'\\r\\n\\r\\n'",
",",
"'\\r\\n'",
")",
"vevents",
"=",
"vcal",
".",
"split",
"(",
"'\\r\\nBEGIN:VEVENT\\r\\n'",
")",
"del",
"(",
"vevents",
"[",
"0",
"]",
")",
"events",
"=",
"[",
"]",
"for",
"vevent",
"in",
"vevents",
":",
"event",
"=",
"{",
"}",
"for",
"line",
"in",
"vevent",
".",
"split",
"(",
"'\\r\\n'",
")",
":",
"line",
"=",
"line",
".",
"split",
"(",
"':'",
",",
"1",
")",
"key",
"=",
"line",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"if",
"len",
"(",
"line",
")",
"<=",
"1",
"or",
"key",
"==",
"'end'",
":",
"continue",
"if",
"key",
".",
"startswith",
"(",
"'dt'",
")",
":",
"event",
"[",
"key",
"]",
"=",
"unix_ts",
"(",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"line",
"[",
"1",
"]",
")",
")",
"continue",
"if",
"not",
"key",
".",
"startswith",
"(",
"'attach'",
")",
":",
"event",
"[",
"key",
"]",
"=",
"line",
"[",
"1",
"]",
"continue",
"# finally handle attachments",
"event",
"[",
"'attach'",
"]",
"=",
"event",
".",
"get",
"(",
"'attach'",
",",
"[",
"]",
")",
"attachment",
"=",
"{",
"}",
"for",
"x",
"in",
"[",
"x",
".",
"split",
"(",
"'='",
")",
"for",
"x",
"in",
"line",
"[",
"0",
"]",
".",
"split",
"(",
"';'",
")",
"]",
":",
"if",
"x",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"in",
"[",
"'fmttype'",
",",
"'x-apple-filename'",
"]",
":",
"attachment",
"[",
"x",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"]",
"=",
"x",
"[",
"1",
"]",
"attachment",
"[",
"'data'",
"]",
"=",
"b64decode",
"(",
"line",
"[",
"1",
"]",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"event",
"[",
"'attach'",
"]",
".",
"append",
"(",
"attachment",
")",
"events",
".",
"append",
"(",
"event",
")",
"return",
"events"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
get_schedule
|
Try to load schedule from the Matterhorn core. Returns a valid schedule
or None on failure.
|
pyca/schedule.py
|
def get_schedule():
'''Try to load schedule from the Matterhorn core. Returns a valid schedule
or None on failure.
'''
params = {'agentid': config()['agent']['name'].encode('utf8')}
lookahead = config()['agent']['cal_lookahead'] * 24 * 60 * 60
if lookahead:
params['cutoff'] = str((timestamp() + lookahead) * 1000)
uri = '%s/calendars?%s' % (config()['service-scheduler'][0],
urlencode(params))
try:
vcal = http_request(uri)
except pycurl.error as e:
logger.error('Could not get schedule: %s' % e)
return
try:
cal = parse_ical(vcal.decode('utf-8'))
except Exception:
logger.error('Could not parse ical')
logger.error(traceback.format_exc())
return
db = get_session()
db.query(UpcomingEvent).delete()
for event in cal:
# Ignore events that have already ended
if event['dtend'] <= timestamp():
continue
e = UpcomingEvent()
e.start = event['dtstart']
e.end = event['dtend']
e.uid = event.get('uid')
e.title = event.get('summary')
e.set_data(event)
db.add(e)
db.commit()
|
def get_schedule():
'''Try to load schedule from the Matterhorn core. Returns a valid schedule
or None on failure.
'''
params = {'agentid': config()['agent']['name'].encode('utf8')}
lookahead = config()['agent']['cal_lookahead'] * 24 * 60 * 60
if lookahead:
params['cutoff'] = str((timestamp() + lookahead) * 1000)
uri = '%s/calendars?%s' % (config()['service-scheduler'][0],
urlencode(params))
try:
vcal = http_request(uri)
except pycurl.error as e:
logger.error('Could not get schedule: %s' % e)
return
try:
cal = parse_ical(vcal.decode('utf-8'))
except Exception:
logger.error('Could not parse ical')
logger.error(traceback.format_exc())
return
db = get_session()
db.query(UpcomingEvent).delete()
for event in cal:
# Ignore events that have already ended
if event['dtend'] <= timestamp():
continue
e = UpcomingEvent()
e.start = event['dtstart']
e.end = event['dtend']
e.uid = event.get('uid')
e.title = event.get('summary')
e.set_data(event)
db.add(e)
db.commit()
|
[
"Try",
"to",
"load",
"schedule",
"from",
"the",
"Matterhorn",
"core",
".",
"Returns",
"a",
"valid",
"schedule",
"or",
"None",
"on",
"failure",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/schedule.py#L64-L99
|
[
"def",
"get_schedule",
"(",
")",
":",
"params",
"=",
"{",
"'agentid'",
":",
"config",
"(",
")",
"[",
"'agent'",
"]",
"[",
"'name'",
"]",
".",
"encode",
"(",
"'utf8'",
")",
"}",
"lookahead",
"=",
"config",
"(",
")",
"[",
"'agent'",
"]",
"[",
"'cal_lookahead'",
"]",
"*",
"24",
"*",
"60",
"*",
"60",
"if",
"lookahead",
":",
"params",
"[",
"'cutoff'",
"]",
"=",
"str",
"(",
"(",
"timestamp",
"(",
")",
"+",
"lookahead",
")",
"*",
"1000",
")",
"uri",
"=",
"'%s/calendars?%s'",
"%",
"(",
"config",
"(",
")",
"[",
"'service-scheduler'",
"]",
"[",
"0",
"]",
",",
"urlencode",
"(",
"params",
")",
")",
"try",
":",
"vcal",
"=",
"http_request",
"(",
"uri",
")",
"except",
"pycurl",
".",
"error",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Could not get schedule: %s'",
"%",
"e",
")",
"return",
"try",
":",
"cal",
"=",
"parse_ical",
"(",
"vcal",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"except",
"Exception",
":",
"logger",
".",
"error",
"(",
"'Could not parse ical'",
")",
"logger",
".",
"error",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"db",
"=",
"get_session",
"(",
")",
"db",
".",
"query",
"(",
"UpcomingEvent",
")",
".",
"delete",
"(",
")",
"for",
"event",
"in",
"cal",
":",
"# Ignore events that have already ended",
"if",
"event",
"[",
"'dtend'",
"]",
"<=",
"timestamp",
"(",
")",
":",
"continue",
"e",
"=",
"UpcomingEvent",
"(",
")",
"e",
".",
"start",
"=",
"event",
"[",
"'dtstart'",
"]",
"e",
".",
"end",
"=",
"event",
"[",
"'dtend'",
"]",
"e",
".",
"uid",
"=",
"event",
".",
"get",
"(",
"'uid'",
")",
"e",
".",
"title",
"=",
"event",
".",
"get",
"(",
"'summary'",
")",
"e",
".",
"set_data",
"(",
"event",
")",
"db",
".",
"add",
"(",
"e",
")",
"db",
".",
"commit",
"(",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
control_loop
|
Main loop, retrieving the schedule.
|
pyca/schedule.py
|
def control_loop():
'''Main loop, retrieving the schedule.
'''
set_service_status(Service.SCHEDULE, ServiceStatus.BUSY)
notify.notify('READY=1')
while not terminate():
notify.notify('WATCHDOG=1')
# Try getting an updated schedule
get_schedule()
session = get_session()
next_event = session.query(UpcomingEvent)\
.filter(UpcomingEvent.end > timestamp())\
.order_by(UpcomingEvent.start)\
.first()
if next_event:
logger.info('Next scheduled recording: %s',
datetime.fromtimestamp(next_event.start))
notify.notify('STATUS=Next scheduled recording: %s' %
datetime.fromtimestamp(next_event.start))
else:
logger.info('No scheduled recording')
notify.notify('STATUS=No scheduled recording')
session.close()
next_update = timestamp() + config()['agent']['update_frequency']
while not terminate() and timestamp() < next_update:
time.sleep(0.1)
logger.info('Shutting down schedule service')
set_service_status(Service.SCHEDULE, ServiceStatus.STOPPED)
|
def control_loop():
'''Main loop, retrieving the schedule.
'''
set_service_status(Service.SCHEDULE, ServiceStatus.BUSY)
notify.notify('READY=1')
while not terminate():
notify.notify('WATCHDOG=1')
# Try getting an updated schedule
get_schedule()
session = get_session()
next_event = session.query(UpcomingEvent)\
.filter(UpcomingEvent.end > timestamp())\
.order_by(UpcomingEvent.start)\
.first()
if next_event:
logger.info('Next scheduled recording: %s',
datetime.fromtimestamp(next_event.start))
notify.notify('STATUS=Next scheduled recording: %s' %
datetime.fromtimestamp(next_event.start))
else:
logger.info('No scheduled recording')
notify.notify('STATUS=No scheduled recording')
session.close()
next_update = timestamp() + config()['agent']['update_frequency']
while not terminate() and timestamp() < next_update:
time.sleep(0.1)
logger.info('Shutting down schedule service')
set_service_status(Service.SCHEDULE, ServiceStatus.STOPPED)
|
[
"Main",
"loop",
"retrieving",
"the",
"schedule",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/schedule.py#L102-L131
|
[
"def",
"control_loop",
"(",
")",
":",
"set_service_status",
"(",
"Service",
".",
"SCHEDULE",
",",
"ServiceStatus",
".",
"BUSY",
")",
"notify",
".",
"notify",
"(",
"'READY=1'",
")",
"while",
"not",
"terminate",
"(",
")",
":",
"notify",
".",
"notify",
"(",
"'WATCHDOG=1'",
")",
"# Try getting an updated schedule",
"get_schedule",
"(",
")",
"session",
"=",
"get_session",
"(",
")",
"next_event",
"=",
"session",
".",
"query",
"(",
"UpcomingEvent",
")",
".",
"filter",
"(",
"UpcomingEvent",
".",
"end",
">",
"timestamp",
"(",
")",
")",
".",
"order_by",
"(",
"UpcomingEvent",
".",
"start",
")",
".",
"first",
"(",
")",
"if",
"next_event",
":",
"logger",
".",
"info",
"(",
"'Next scheduled recording: %s'",
",",
"datetime",
".",
"fromtimestamp",
"(",
"next_event",
".",
"start",
")",
")",
"notify",
".",
"notify",
"(",
"'STATUS=Next scheduled recording: %s'",
"%",
"datetime",
".",
"fromtimestamp",
"(",
"next_event",
".",
"start",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'No scheduled recording'",
")",
"notify",
".",
"notify",
"(",
"'STATUS=No scheduled recording'",
")",
"session",
".",
"close",
"(",
")",
"next_update",
"=",
"timestamp",
"(",
")",
"+",
"config",
"(",
")",
"[",
"'agent'",
"]",
"[",
"'update_frequency'",
"]",
"while",
"not",
"terminate",
"(",
")",
"and",
"timestamp",
"(",
")",
"<",
"next_update",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"logger",
".",
"info",
"(",
"'Shutting down schedule service'",
")",
"set_service_status",
"(",
"Service",
".",
"SCHEDULE",
",",
"ServiceStatus",
".",
"STOPPED",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
control_loop
|
Main loop, updating the capture agent state.
|
pyca/agentstate.py
|
def control_loop():
'''Main loop, updating the capture agent state.
'''
set_service_status(Service.AGENTSTATE, ServiceStatus.BUSY)
notify.notify('READY=1')
notify.notify('STATUS=Running')
while not terminate():
notify.notify('WATCHDOG=1')
update_agent_state()
next_update = timestamp() + config()['agent']['update_frequency']
while not terminate() and timestamp() < next_update:
time.sleep(0.1)
logger.info('Shutting down agentstate service')
set_service_status(Service.AGENTSTATE, ServiceStatus.STOPPED)
|
def control_loop():
'''Main loop, updating the capture agent state.
'''
set_service_status(Service.AGENTSTATE, ServiceStatus.BUSY)
notify.notify('READY=1')
notify.notify('STATUS=Running')
while not terminate():
notify.notify('WATCHDOG=1')
update_agent_state()
next_update = timestamp() + config()['agent']['update_frequency']
while not terminate() and timestamp() < next_update:
time.sleep(0.1)
logger.info('Shutting down agentstate service')
set_service_status(Service.AGENTSTATE, ServiceStatus.STOPPED)
|
[
"Main",
"loop",
"updating",
"the",
"capture",
"agent",
"state",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/agentstate.py#L22-L37
|
[
"def",
"control_loop",
"(",
")",
":",
"set_service_status",
"(",
"Service",
".",
"AGENTSTATE",
",",
"ServiceStatus",
".",
"BUSY",
")",
"notify",
".",
"notify",
"(",
"'READY=1'",
")",
"notify",
".",
"notify",
"(",
"'STATUS=Running'",
")",
"while",
"not",
"terminate",
"(",
")",
":",
"notify",
".",
"notify",
"(",
"'WATCHDOG=1'",
")",
"update_agent_state",
"(",
")",
"next_update",
"=",
"timestamp",
"(",
")",
"+",
"config",
"(",
")",
"[",
"'agent'",
"]",
"[",
"'update_frequency'",
"]",
"while",
"not",
"terminate",
"(",
")",
"and",
"timestamp",
"(",
")",
"<",
"next_update",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"logger",
".",
"info",
"(",
"'Shutting down agentstate service'",
")",
"set_service_status",
"(",
"Service",
".",
"AGENTSTATE",
",",
"ServiceStatus",
".",
"STOPPED",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
make_error_response
|
Return a response with a jsonapi error object
|
pyca/ui/jsonapi.py
|
def make_error_response(error, status=500):
''' Return a response with a jsonapi error object
'''
content = {
'errors': [{
'status': status,
'title': error
}]
}
return make_response(jsonify(content), status)
|
def make_error_response(error, status=500):
''' Return a response with a jsonapi error object
'''
content = {
'errors': [{
'status': status,
'title': error
}]
}
return make_response(jsonify(content), status)
|
[
"Return",
"a",
"response",
"with",
"a",
"jsonapi",
"error",
"object"
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/jsonapi.py#L14-L23
|
[
"def",
"make_error_response",
"(",
"error",
",",
"status",
"=",
"500",
")",
":",
"content",
"=",
"{",
"'errors'",
":",
"[",
"{",
"'status'",
":",
"status",
",",
"'title'",
":",
"error",
"}",
"]",
"}",
"return",
"make_response",
"(",
"jsonify",
"(",
"content",
")",
",",
"status",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
make_data_response
|
Return a response with a list of jsonapi data objects
|
pyca/ui/jsonapi.py
|
def make_data_response(data, status=200):
''' Return a response with a list of jsonapi data objects
'''
content = {'data': ensurelist(data)}
return make_response(jsonify(content), status)
|
def make_data_response(data, status=200):
''' Return a response with a list of jsonapi data objects
'''
content = {'data': ensurelist(data)}
return make_response(jsonify(content), status)
|
[
"Return",
"a",
"response",
"with",
"a",
"list",
"of",
"jsonapi",
"data",
"objects"
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/jsonapi.py#L26-L30
|
[
"def",
"make_data_response",
"(",
"data",
",",
"status",
"=",
"200",
")",
":",
"content",
"=",
"{",
"'data'",
":",
"ensurelist",
"(",
"data",
")",
"}",
"return",
"make_response",
"(",
"jsonify",
"(",
"content",
")",
",",
"status",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
internal_state
|
Serve a json representation of internal agentstate as meta data
|
pyca/ui/jsonapi.py
|
def internal_state():
'''Serve a json representation of internal agentstate as meta data
'''
data = {'services': {
'capture': ServiceStatus.str(get_service_status(Service.CAPTURE)),
'ingest': ServiceStatus.str(get_service_status(Service.INGEST)),
'schedule': ServiceStatus.str(get_service_status(Service.SCHEDULE)),
'agentstate': ServiceStatus.str(get_service_status(Service.AGENTSTATE))
}
}
return make_response(jsonify({'meta': data}))
|
def internal_state():
'''Serve a json representation of internal agentstate as meta data
'''
data = {'services': {
'capture': ServiceStatus.str(get_service_status(Service.CAPTURE)),
'ingest': ServiceStatus.str(get_service_status(Service.INGEST)),
'schedule': ServiceStatus.str(get_service_status(Service.SCHEDULE)),
'agentstate': ServiceStatus.str(get_service_status(Service.AGENTSTATE))
}
}
return make_response(jsonify({'meta': data}))
|
[
"Serve",
"a",
"json",
"representation",
"of",
"internal",
"agentstate",
"as",
"meta",
"data"
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/jsonapi.py#L36-L46
|
[
"def",
"internal_state",
"(",
")",
":",
"data",
"=",
"{",
"'services'",
":",
"{",
"'capture'",
":",
"ServiceStatus",
".",
"str",
"(",
"get_service_status",
"(",
"Service",
".",
"CAPTURE",
")",
")",
",",
"'ingest'",
":",
"ServiceStatus",
".",
"str",
"(",
"get_service_status",
"(",
"Service",
".",
"INGEST",
")",
")",
",",
"'schedule'",
":",
"ServiceStatus",
".",
"str",
"(",
"get_service_status",
"(",
"Service",
".",
"SCHEDULE",
")",
")",
",",
"'agentstate'",
":",
"ServiceStatus",
".",
"str",
"(",
"get_service_status",
"(",
"Service",
".",
"AGENTSTATE",
")",
")",
"}",
"}",
"return",
"make_response",
"(",
"jsonify",
"(",
"{",
"'meta'",
":",
"data",
"}",
")",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
events
|
Serve a JSON representation of events
|
pyca/ui/jsonapi.py
|
def events():
'''Serve a JSON representation of events
'''
db = get_session()
upcoming_events = db.query(UpcomingEvent)\
.order_by(UpcomingEvent.start)
recorded_events = db.query(RecordedEvent)\
.order_by(RecordedEvent.start.desc())
result = [event.serialize() for event in upcoming_events]
result += [event.serialize() for event in recorded_events]
return make_data_response(result)
|
def events():
'''Serve a JSON representation of events
'''
db = get_session()
upcoming_events = db.query(UpcomingEvent)\
.order_by(UpcomingEvent.start)
recorded_events = db.query(RecordedEvent)\
.order_by(RecordedEvent.start.desc())
result = [event.serialize() for event in upcoming_events]
result += [event.serialize() for event in recorded_events]
return make_data_response(result)
|
[
"Serve",
"a",
"JSON",
"representation",
"of",
"events"
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/jsonapi.py#L52-L63
|
[
"def",
"events",
"(",
")",
":",
"db",
"=",
"get_session",
"(",
")",
"upcoming_events",
"=",
"db",
".",
"query",
"(",
"UpcomingEvent",
")",
".",
"order_by",
"(",
"UpcomingEvent",
".",
"start",
")",
"recorded_events",
"=",
"db",
".",
"query",
"(",
"RecordedEvent",
")",
".",
"order_by",
"(",
"RecordedEvent",
".",
"start",
".",
"desc",
"(",
")",
")",
"result",
"=",
"[",
"event",
".",
"serialize",
"(",
")",
"for",
"event",
"in",
"upcoming_events",
"]",
"result",
"+=",
"[",
"event",
".",
"serialize",
"(",
")",
"for",
"event",
"in",
"recorded_events",
"]",
"return",
"make_data_response",
"(",
"result",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
event
|
Return a specific events JSON
|
pyca/ui/jsonapi.py
|
def event(uid):
'''Return a specific events JSON
'''
db = get_session()
event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first() \
or db.query(UpcomingEvent).filter(UpcomingEvent.uid == uid).first()
if event:
return make_data_response(event.serialize())
return make_error_response('No event with specified uid', 404)
|
def event(uid):
'''Return a specific events JSON
'''
db = get_session()
event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first() \
or db.query(UpcomingEvent).filter(UpcomingEvent.uid == uid).first()
if event:
return make_data_response(event.serialize())
return make_error_response('No event with specified uid', 404)
|
[
"Return",
"a",
"specific",
"events",
"JSON"
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/jsonapi.py#L69-L78
|
[
"def",
"event",
"(",
"uid",
")",
":",
"db",
"=",
"get_session",
"(",
")",
"event",
"=",
"db",
".",
"query",
"(",
"RecordedEvent",
")",
".",
"filter",
"(",
"RecordedEvent",
".",
"uid",
"==",
"uid",
")",
".",
"first",
"(",
")",
"or",
"db",
".",
"query",
"(",
"UpcomingEvent",
")",
".",
"filter",
"(",
"UpcomingEvent",
".",
"uid",
"==",
"uid",
")",
".",
"first",
"(",
")",
"if",
"event",
":",
"return",
"make_data_response",
"(",
"event",
".",
"serialize",
"(",
")",
")",
"return",
"make_error_response",
"(",
"'No event with specified uid'",
",",
"404",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
delete_event
|
Delete a specific event identified by its uid. Note that only recorded
events can be deleted. Events in the buffer for upcoming events are
regularly replaced anyway and a manual removal could have unpredictable
effects.
Use ?hard=true parameter to delete the recorded files on disk as well.
Returns 204 if the action was successful.
Returns 404 if event does not exist
|
pyca/ui/jsonapi.py
|
def delete_event(uid):
'''Delete a specific event identified by its uid. Note that only recorded
events can be deleted. Events in the buffer for upcoming events are
regularly replaced anyway and a manual removal could have unpredictable
effects.
Use ?hard=true parameter to delete the recorded files on disk as well.
Returns 204 if the action was successful.
Returns 404 if event does not exist
'''
logger.info('deleting event %s via api', uid)
db = get_session()
events = db.query(RecordedEvent).filter(RecordedEvent.uid == uid)
if not events.count():
return make_error_response('No event with specified uid', 404)
hard_delete = request.args.get('hard', 'false')
if hard_delete == 'true':
logger.info('deleting recorded files at %s', events[0].directory())
shutil.rmtree(events[0].directory())
events.delete()
db.commit()
return make_response('', 204)
|
def delete_event(uid):
'''Delete a specific event identified by its uid. Note that only recorded
events can be deleted. Events in the buffer for upcoming events are
regularly replaced anyway and a manual removal could have unpredictable
effects.
Use ?hard=true parameter to delete the recorded files on disk as well.
Returns 204 if the action was successful.
Returns 404 if event does not exist
'''
logger.info('deleting event %s via api', uid)
db = get_session()
events = db.query(RecordedEvent).filter(RecordedEvent.uid == uid)
if not events.count():
return make_error_response('No event with specified uid', 404)
hard_delete = request.args.get('hard', 'false')
if hard_delete == 'true':
logger.info('deleting recorded files at %s', events[0].directory())
shutil.rmtree(events[0].directory())
events.delete()
db.commit()
return make_response('', 204)
|
[
"Delete",
"a",
"specific",
"event",
"identified",
"by",
"its",
"uid",
".",
"Note",
"that",
"only",
"recorded",
"events",
"can",
"be",
"deleted",
".",
"Events",
"in",
"the",
"buffer",
"for",
"upcoming",
"events",
"are",
"regularly",
"replaced",
"anyway",
"and",
"a",
"manual",
"removal",
"could",
"have",
"unpredictable",
"effects",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/jsonapi.py#L84-L106
|
[
"def",
"delete_event",
"(",
"uid",
")",
":",
"logger",
".",
"info",
"(",
"'deleting event %s via api'",
",",
"uid",
")",
"db",
"=",
"get_session",
"(",
")",
"events",
"=",
"db",
".",
"query",
"(",
"RecordedEvent",
")",
".",
"filter",
"(",
"RecordedEvent",
".",
"uid",
"==",
"uid",
")",
"if",
"not",
"events",
".",
"count",
"(",
")",
":",
"return",
"make_error_response",
"(",
"'No event with specified uid'",
",",
"404",
")",
"hard_delete",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'hard'",
",",
"'false'",
")",
"if",
"hard_delete",
"==",
"'true'",
":",
"logger",
".",
"info",
"(",
"'deleting recorded files at %s'",
",",
"events",
"[",
"0",
"]",
".",
"directory",
"(",
")",
")",
"shutil",
".",
"rmtree",
"(",
"events",
"[",
"0",
"]",
".",
"directory",
"(",
")",
")",
"events",
".",
"delete",
"(",
")",
"db",
".",
"commit",
"(",
")",
"return",
"make_response",
"(",
"''",
",",
"204",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
modify_event
|
Modify an event specified by its uid. The modifications for the event
are expected as JSON with the content type correctly set in the request.
Note that this method works for recorded events only. Upcoming events part
of the scheduler cache cannot be modified.
|
pyca/ui/jsonapi.py
|
def modify_event(uid):
'''Modify an event specified by its uid. The modifications for the event
are expected as JSON with the content type correctly set in the request.
Note that this method works for recorded events only. Upcoming events part
of the scheduler cache cannot be modified.
'''
try:
data = request.get_json()['data'][0]
if data['type'] != 'event' or data['id'] != uid:
return make_error_response('Invalid data', 400)
# Check attributes
for key in data['attributes'].keys():
if key not in ('status', 'start', 'end'):
return make_error_response('Invalid data', 400)
# Check new status
new_status = data['attributes'].get('status')
if new_status:
new_status = new_status.upper().replace(' ', '_')
data['attributes']['status'] = int(getattr(Status, new_status))
except Exception:
return make_error_response('Invalid data', 400)
db = get_session()
event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first()
if not event:
return make_error_response('No event with specified uid', 404)
event.start = data['attributes'].get('start', event.start)
event.end = data['attributes'].get('end', event.end)
event.status = data['attributes'].get('status', event.status)
logger.debug('Updating event %s via api', uid)
db.commit()
return make_data_response(event.serialize())
|
def modify_event(uid):
'''Modify an event specified by its uid. The modifications for the event
are expected as JSON with the content type correctly set in the request.
Note that this method works for recorded events only. Upcoming events part
of the scheduler cache cannot be modified.
'''
try:
data = request.get_json()['data'][0]
if data['type'] != 'event' or data['id'] != uid:
return make_error_response('Invalid data', 400)
# Check attributes
for key in data['attributes'].keys():
if key not in ('status', 'start', 'end'):
return make_error_response('Invalid data', 400)
# Check new status
new_status = data['attributes'].get('status')
if new_status:
new_status = new_status.upper().replace(' ', '_')
data['attributes']['status'] = int(getattr(Status, new_status))
except Exception:
return make_error_response('Invalid data', 400)
db = get_session()
event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first()
if not event:
return make_error_response('No event with specified uid', 404)
event.start = data['attributes'].get('start', event.start)
event.end = data['attributes'].get('end', event.end)
event.status = data['attributes'].get('status', event.status)
logger.debug('Updating event %s via api', uid)
db.commit()
return make_data_response(event.serialize())
|
[
"Modify",
"an",
"event",
"specified",
"by",
"its",
"uid",
".",
"The",
"modifications",
"for",
"the",
"event",
"are",
"expected",
"as",
"JSON",
"with",
"the",
"content",
"type",
"correctly",
"set",
"in",
"the",
"request",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/jsonapi.py#L112-L144
|
[
"def",
"modify_event",
"(",
"uid",
")",
":",
"try",
":",
"data",
"=",
"request",
".",
"get_json",
"(",
")",
"[",
"'data'",
"]",
"[",
"0",
"]",
"if",
"data",
"[",
"'type'",
"]",
"!=",
"'event'",
"or",
"data",
"[",
"'id'",
"]",
"!=",
"uid",
":",
"return",
"make_error_response",
"(",
"'Invalid data'",
",",
"400",
")",
"# Check attributes",
"for",
"key",
"in",
"data",
"[",
"'attributes'",
"]",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"(",
"'status'",
",",
"'start'",
",",
"'end'",
")",
":",
"return",
"make_error_response",
"(",
"'Invalid data'",
",",
"400",
")",
"# Check new status",
"new_status",
"=",
"data",
"[",
"'attributes'",
"]",
".",
"get",
"(",
"'status'",
")",
"if",
"new_status",
":",
"new_status",
"=",
"new_status",
".",
"upper",
"(",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
"data",
"[",
"'attributes'",
"]",
"[",
"'status'",
"]",
"=",
"int",
"(",
"getattr",
"(",
"Status",
",",
"new_status",
")",
")",
"except",
"Exception",
":",
"return",
"make_error_response",
"(",
"'Invalid data'",
",",
"400",
")",
"db",
"=",
"get_session",
"(",
")",
"event",
"=",
"db",
".",
"query",
"(",
"RecordedEvent",
")",
".",
"filter",
"(",
"RecordedEvent",
".",
"uid",
"==",
"uid",
")",
".",
"first",
"(",
")",
"if",
"not",
"event",
":",
"return",
"make_error_response",
"(",
"'No event with specified uid'",
",",
"404",
")",
"event",
".",
"start",
"=",
"data",
"[",
"'attributes'",
"]",
".",
"get",
"(",
"'start'",
",",
"event",
".",
"start",
")",
"event",
".",
"end",
"=",
"data",
"[",
"'attributes'",
"]",
".",
"get",
"(",
"'end'",
",",
"event",
".",
"end",
")",
"event",
".",
"status",
"=",
"data",
"[",
"'attributes'",
"]",
".",
"get",
"(",
"'status'",
",",
"event",
".",
"status",
")",
"logger",
".",
"debug",
"(",
"'Updating event %s via api'",
",",
"uid",
")",
"db",
".",
"commit",
"(",
")",
"return",
"make_data_response",
"(",
"event",
".",
"serialize",
"(",
")",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
get_config_params
|
Extract the set of configuration parameters from the properties attached
to the schedule
|
pyca/ingest.py
|
def get_config_params(properties):
'''Extract the set of configuration parameters from the properties attached
to the schedule
'''
param = []
wdef = ''
for prop in properties.split('\n'):
if prop.startswith('org.opencastproject.workflow.config'):
key, val = prop.split('=', 1)
key = key.split('.')[-1]
param.append((key, val))
elif prop.startswith('org.opencastproject.workflow.definition'):
wdef = prop.split('=', 1)[-1]
return wdef, param
|
def get_config_params(properties):
'''Extract the set of configuration parameters from the properties attached
to the schedule
'''
param = []
wdef = ''
for prop in properties.split('\n'):
if prop.startswith('org.opencastproject.workflow.config'):
key, val = prop.split('=', 1)
key = key.split('.')[-1]
param.append((key, val))
elif prop.startswith('org.opencastproject.workflow.definition'):
wdef = prop.split('=', 1)[-1]
return wdef, param
|
[
"Extract",
"the",
"set",
"of",
"configuration",
"parameters",
"from",
"the",
"properties",
"attached",
"to",
"the",
"schedule"
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ingest.py#L26-L39
|
[
"def",
"get_config_params",
"(",
"properties",
")",
":",
"param",
"=",
"[",
"]",
"wdef",
"=",
"''",
"for",
"prop",
"in",
"properties",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"prop",
".",
"startswith",
"(",
"'org.opencastproject.workflow.config'",
")",
":",
"key",
",",
"val",
"=",
"prop",
".",
"split",
"(",
"'='",
",",
"1",
")",
"key",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"param",
".",
"append",
"(",
"(",
"key",
",",
"val",
")",
")",
"elif",
"prop",
".",
"startswith",
"(",
"'org.opencastproject.workflow.definition'",
")",
":",
"wdef",
"=",
"prop",
".",
"split",
"(",
"'='",
",",
"1",
")",
"[",
"-",
"1",
"]",
"return",
"wdef",
",",
"param"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
ingest
|
Ingest a finished recording to the Opencast server.
|
pyca/ingest.py
|
def ingest(event):
'''Ingest a finished recording to the Opencast server.
'''
# Update status
set_service_status(Service.INGEST, ServiceStatus.BUSY)
notify.notify('STATUS=Uploading')
recording_state(event.uid, 'uploading')
update_event_status(event, Status.UPLOADING)
# Select ingest service
# The ingest service to use is selected at random from the available
# ingest services to ensure that not every capture agent uses the same
# service at the same time
service = config('service-ingest')
service = service[randrange(0, len(service))]
logger.info('Selecting ingest service to use: ' + service)
# create mediapackage
logger.info('Creating new mediapackage')
mediapackage = http_request(service + '/createMediaPackage')
# extract workflow_def, workflow_config and add DC catalogs
prop = 'org.opencastproject.capture.agent.properties'
dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/'
for attachment in event.get_data().get('attach'):
data = attachment.get('data')
if attachment.get('x-apple-filename') == prop:
workflow_def, workflow_config = get_config_params(data)
# Check for dublincore catalogs
elif attachment.get('fmttype') == 'application/xml' and dcns in data:
name = attachment.get('x-apple-filename', '').rsplit('.', 1)[0]
logger.info('Adding %s DC catalog' % name)
fields = [('mediaPackage', mediapackage),
('flavor', 'dublincore/%s' % name),
('dublinCore', data.encode('utf-8'))]
mediapackage = http_request(service + '/addDCCatalog', fields)
# add track
for (flavor, track) in event.get_tracks():
logger.info('Adding track ({0} -> {1})'.format(flavor, track))
track = track.encode('ascii', 'ignore')
fields = [('mediaPackage', mediapackage), ('flavor', flavor),
('BODY1', (pycurl.FORM_FILE, track))]
mediapackage = http_request(service + '/addTrack', fields)
# ingest
logger.info('Ingest recording')
fields = [('mediaPackage', mediapackage)]
if workflow_def:
fields.append(('workflowDefinitionId', workflow_def))
if event.uid:
fields.append(('workflowInstanceId',
event.uid.encode('ascii', 'ignore')))
fields += workflow_config
mediapackage = http_request(service + '/ingest', fields)
# Update status
recording_state(event.uid, 'upload_finished')
update_event_status(event, Status.FINISHED_UPLOADING)
notify.notify('STATUS=Running')
set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
logger.info('Finished ingest')
|
def ingest(event):
'''Ingest a finished recording to the Opencast server.
'''
# Update status
set_service_status(Service.INGEST, ServiceStatus.BUSY)
notify.notify('STATUS=Uploading')
recording_state(event.uid, 'uploading')
update_event_status(event, Status.UPLOADING)
# Select ingest service
# The ingest service to use is selected at random from the available
# ingest services to ensure that not every capture agent uses the same
# service at the same time
service = config('service-ingest')
service = service[randrange(0, len(service))]
logger.info('Selecting ingest service to use: ' + service)
# create mediapackage
logger.info('Creating new mediapackage')
mediapackage = http_request(service + '/createMediaPackage')
# extract workflow_def, workflow_config and add DC catalogs
prop = 'org.opencastproject.capture.agent.properties'
dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/'
for attachment in event.get_data().get('attach'):
data = attachment.get('data')
if attachment.get('x-apple-filename') == prop:
workflow_def, workflow_config = get_config_params(data)
# Check for dublincore catalogs
elif attachment.get('fmttype') == 'application/xml' and dcns in data:
name = attachment.get('x-apple-filename', '').rsplit('.', 1)[0]
logger.info('Adding %s DC catalog' % name)
fields = [('mediaPackage', mediapackage),
('flavor', 'dublincore/%s' % name),
('dublinCore', data.encode('utf-8'))]
mediapackage = http_request(service + '/addDCCatalog', fields)
# add track
for (flavor, track) in event.get_tracks():
logger.info('Adding track ({0} -> {1})'.format(flavor, track))
track = track.encode('ascii', 'ignore')
fields = [('mediaPackage', mediapackage), ('flavor', flavor),
('BODY1', (pycurl.FORM_FILE, track))]
mediapackage = http_request(service + '/addTrack', fields)
# ingest
logger.info('Ingest recording')
fields = [('mediaPackage', mediapackage)]
if workflow_def:
fields.append(('workflowDefinitionId', workflow_def))
if event.uid:
fields.append(('workflowInstanceId',
event.uid.encode('ascii', 'ignore')))
fields += workflow_config
mediapackage = http_request(service + '/ingest', fields)
# Update status
recording_state(event.uid, 'upload_finished')
update_event_status(event, Status.FINISHED_UPLOADING)
notify.notify('STATUS=Running')
set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
logger.info('Finished ingest')
|
[
"Ingest",
"a",
"finished",
"recording",
"to",
"the",
"Opencast",
"server",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ingest.py#L42-L105
|
[
"def",
"ingest",
"(",
"event",
")",
":",
"# Update status",
"set_service_status",
"(",
"Service",
".",
"INGEST",
",",
"ServiceStatus",
".",
"BUSY",
")",
"notify",
".",
"notify",
"(",
"'STATUS=Uploading'",
")",
"recording_state",
"(",
"event",
".",
"uid",
",",
"'uploading'",
")",
"update_event_status",
"(",
"event",
",",
"Status",
".",
"UPLOADING",
")",
"# Select ingest service",
"# The ingest service to use is selected at random from the available",
"# ingest services to ensure that not every capture agent uses the same",
"# service at the same time",
"service",
"=",
"config",
"(",
"'service-ingest'",
")",
"service",
"=",
"service",
"[",
"randrange",
"(",
"0",
",",
"len",
"(",
"service",
")",
")",
"]",
"logger",
".",
"info",
"(",
"'Selecting ingest service to use: '",
"+",
"service",
")",
"# create mediapackage",
"logger",
".",
"info",
"(",
"'Creating new mediapackage'",
")",
"mediapackage",
"=",
"http_request",
"(",
"service",
"+",
"'/createMediaPackage'",
")",
"# extract workflow_def, workflow_config and add DC catalogs",
"prop",
"=",
"'org.opencastproject.capture.agent.properties'",
"dcns",
"=",
"'http://www.opencastproject.org/xsd/1.0/dublincore/'",
"for",
"attachment",
"in",
"event",
".",
"get_data",
"(",
")",
".",
"get",
"(",
"'attach'",
")",
":",
"data",
"=",
"attachment",
".",
"get",
"(",
"'data'",
")",
"if",
"attachment",
".",
"get",
"(",
"'x-apple-filename'",
")",
"==",
"prop",
":",
"workflow_def",
",",
"workflow_config",
"=",
"get_config_params",
"(",
"data",
")",
"# Check for dublincore catalogs",
"elif",
"attachment",
".",
"get",
"(",
"'fmttype'",
")",
"==",
"'application/xml'",
"and",
"dcns",
"in",
"data",
":",
"name",
"=",
"attachment",
".",
"get",
"(",
"'x-apple-filename'",
",",
"''",
")",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"logger",
".",
"info",
"(",
"'Adding %s DC catalog'",
"%",
"name",
")",
"fields",
"=",
"[",
"(",
"'mediaPackage'",
",",
"mediapackage",
")",
",",
"(",
"'flavor'",
",",
"'dublincore/%s'",
"%",
"name",
")",
",",
"(",
"'dublinCore'",
",",
"data",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"]",
"mediapackage",
"=",
"http_request",
"(",
"service",
"+",
"'/addDCCatalog'",
",",
"fields",
")",
"# add track",
"for",
"(",
"flavor",
",",
"track",
")",
"in",
"event",
".",
"get_tracks",
"(",
")",
":",
"logger",
".",
"info",
"(",
"'Adding track ({0} -> {1})'",
".",
"format",
"(",
"flavor",
",",
"track",
")",
")",
"track",
"=",
"track",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"fields",
"=",
"[",
"(",
"'mediaPackage'",
",",
"mediapackage",
")",
",",
"(",
"'flavor'",
",",
"flavor",
")",
",",
"(",
"'BODY1'",
",",
"(",
"pycurl",
".",
"FORM_FILE",
",",
"track",
")",
")",
"]",
"mediapackage",
"=",
"http_request",
"(",
"service",
"+",
"'/addTrack'",
",",
"fields",
")",
"# ingest",
"logger",
".",
"info",
"(",
"'Ingest recording'",
")",
"fields",
"=",
"[",
"(",
"'mediaPackage'",
",",
"mediapackage",
")",
"]",
"if",
"workflow_def",
":",
"fields",
".",
"append",
"(",
"(",
"'workflowDefinitionId'",
",",
"workflow_def",
")",
")",
"if",
"event",
".",
"uid",
":",
"fields",
".",
"append",
"(",
"(",
"'workflowInstanceId'",
",",
"event",
".",
"uid",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
")",
")",
"fields",
"+=",
"workflow_config",
"mediapackage",
"=",
"http_request",
"(",
"service",
"+",
"'/ingest'",
",",
"fields",
")",
"# Update status",
"recording_state",
"(",
"event",
".",
"uid",
",",
"'upload_finished'",
")",
"update_event_status",
"(",
"event",
",",
"Status",
".",
"FINISHED_UPLOADING",
")",
"notify",
".",
"notify",
"(",
"'STATUS=Running'",
")",
"set_service_status_immediate",
"(",
"Service",
".",
"INGEST",
",",
"ServiceStatus",
".",
"IDLE",
")",
"logger",
".",
"info",
"(",
"'Finished ingest'",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
safe_start_ingest
|
Start a capture process but make sure to catch any errors during this
process, log them but otherwise ignore them.
|
pyca/ingest.py
|
def safe_start_ingest(event):
'''Start a capture process but make sure to catch any errors during this
process, log them but otherwise ignore them.
'''
try:
ingest(event)
except Exception:
logger.error('Something went wrong during the upload')
logger.error(traceback.format_exc())
# Update state if something went wrong
recording_state(event.uid, 'upload_error')
update_event_status(event, Status.FAILED_UPLOADING)
set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
|
def safe_start_ingest(event):
'''Start a capture process but make sure to catch any errors during this
process, log them but otherwise ignore them.
'''
try:
ingest(event)
except Exception:
logger.error('Something went wrong during the upload')
logger.error(traceback.format_exc())
# Update state if something went wrong
recording_state(event.uid, 'upload_error')
update_event_status(event, Status.FAILED_UPLOADING)
set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
|
[
"Start",
"a",
"capture",
"process",
"but",
"make",
"sure",
"to",
"catch",
"any",
"errors",
"during",
"this",
"process",
"log",
"them",
"but",
"otherwise",
"ignore",
"them",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ingest.py#L108-L120
|
[
"def",
"safe_start_ingest",
"(",
"event",
")",
":",
"try",
":",
"ingest",
"(",
"event",
")",
"except",
"Exception",
":",
"logger",
".",
"error",
"(",
"'Something went wrong during the upload'",
")",
"logger",
".",
"error",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"# Update state if something went wrong",
"recording_state",
"(",
"event",
".",
"uid",
",",
"'upload_error'",
")",
"update_event_status",
"(",
"event",
",",
"Status",
".",
"FAILED_UPLOADING",
")",
"set_service_status_immediate",
"(",
"Service",
".",
"INGEST",
",",
"ServiceStatus",
".",
"IDLE",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
control_loop
|
Main loop of the capture agent, retrieving and checking the schedule as
well as starting the capture process if necessry.
|
pyca/ingest.py
|
def control_loop():
'''Main loop of the capture agent, retrieving and checking the schedule as
well as starting the capture process if necessry.
'''
set_service_status(Service.INGEST, ServiceStatus.IDLE)
notify.notify('READY=1')
notify.notify('STATUS=Running')
while not terminate():
notify.notify('WATCHDOG=1')
# Get next recording
event = get_session().query(RecordedEvent)\
.filter(RecordedEvent.status ==
Status.FINISHED_RECORDING).first()
if event:
safe_start_ingest(event)
time.sleep(1.0)
logger.info('Shutting down ingest service')
set_service_status(Service.INGEST, ServiceStatus.STOPPED)
|
def control_loop():
'''Main loop of the capture agent, retrieving and checking the schedule as
well as starting the capture process if necessry.
'''
set_service_status(Service.INGEST, ServiceStatus.IDLE)
notify.notify('READY=1')
notify.notify('STATUS=Running')
while not terminate():
notify.notify('WATCHDOG=1')
# Get next recording
event = get_session().query(RecordedEvent)\
.filter(RecordedEvent.status ==
Status.FINISHED_RECORDING).first()
if event:
safe_start_ingest(event)
time.sleep(1.0)
logger.info('Shutting down ingest service')
set_service_status(Service.INGEST, ServiceStatus.STOPPED)
|
[
"Main",
"loop",
"of",
"the",
"capture",
"agent",
"retrieving",
"and",
"checking",
"the",
"schedule",
"as",
"well",
"as",
"starting",
"the",
"capture",
"process",
"if",
"necessry",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ingest.py#L123-L140
|
[
"def",
"control_loop",
"(",
")",
":",
"set_service_status",
"(",
"Service",
".",
"INGEST",
",",
"ServiceStatus",
".",
"IDLE",
")",
"notify",
".",
"notify",
"(",
"'READY=1'",
")",
"notify",
".",
"notify",
"(",
"'STATUS=Running'",
")",
"while",
"not",
"terminate",
"(",
")",
":",
"notify",
".",
"notify",
"(",
"'WATCHDOG=1'",
")",
"# Get next recording",
"event",
"=",
"get_session",
"(",
")",
".",
"query",
"(",
"RecordedEvent",
")",
".",
"filter",
"(",
"RecordedEvent",
".",
"status",
"==",
"Status",
".",
"FINISHED_RECORDING",
")",
".",
"first",
"(",
")",
"if",
"event",
":",
"safe_start_ingest",
"(",
"event",
")",
"time",
".",
"sleep",
"(",
"1.0",
")",
"logger",
".",
"info",
"(",
"'Shutting down ingest service'",
")",
"set_service_status",
"(",
"Service",
".",
"INGEST",
",",
"ServiceStatus",
".",
"STOPPED",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
test
|
sigterm_handler
|
Intercept sigterm and terminate all processes.
|
pyca/capture.py
|
def sigterm_handler(signum, frame):
'''Intercept sigterm and terminate all processes.
'''
if captureproc and captureproc.poll() is None:
captureproc.terminate()
terminate(True)
sys.exit(0)
|
def sigterm_handler(signum, frame):
'''Intercept sigterm and terminate all processes.
'''
if captureproc and captureproc.poll() is None:
captureproc.terminate()
terminate(True)
sys.exit(0)
|
[
"Intercept",
"sigterm",
"and",
"terminate",
"all",
"processes",
"."
] |
opencast/pyCA
|
python
|
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/capture.py#L32-L38
|
[
"def",
"sigterm_handler",
"(",
"signum",
",",
"frame",
")",
":",
"if",
"captureproc",
"and",
"captureproc",
".",
"poll",
"(",
")",
"is",
"None",
":",
"captureproc",
".",
"terminate",
"(",
")",
"terminate",
"(",
"True",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] |
c89b168d4780d157e1b3f7676628c1b131956a88
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.