sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def depth_september_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field
`depth_september_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_september_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_september_average_ground_temperature`'.format(value))
self._depth_september_average_ground_temperature = value
|
Corresponds to IDD Field
`depth_september_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_september_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def depth_october_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_october_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_october_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_october_average_ground_temperature`'.format(value))
self._depth_october_average_ground_temperature = value
|
Corresponds to IDD Field `depth_october_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_october_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def depth_november_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_november_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_november_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_november_average_ground_temperature`'.format(value))
self._depth_november_average_ground_temperature = value
|
Corresponds to IDD Field `depth_november_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_november_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def depth_december_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_december_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_december_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_december_average_ground_temperature`'.format(value))
self._depth_december_average_ground_temperature = value
|
Corresponds to IDD Field `depth_december_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_december_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.ground_temperature_depth))
out.append(self._to_str(self.depth_soil_conductivity))
out.append(self._to_str(self.depth_soil_density))
out.append(self._to_str(self.depth_soil_specific_heat))
out.append(self._to_str(self.depth_january_average_ground_temperature))
out.append(
self._to_str(
self.depth_february_average_ground_temperature))
out.append(self._to_str(self.depth_march_average_ground_temperature))
out.append(self._to_str(self.depth_april_average_ground_temperature))
out.append(self._to_str(self.depth_may_average_ground_temperature))
out.append(self._to_str(self.depth_june_average_ground_temperature))
out.append(self._to_str(self.depth_july_average_ground_temperature))
out.append(self._to_str(self.depth_august_average_ground_temperature))
out.append(
self._to_str(
self.depth_september_average_ground_temperature))
out.append(self._to_str(self.depth_october_average_ground_temperature))
out.append(
self._to_str(
self.depth_november_average_ground_temperature))
out.append(
self._to_str(
self.depth_december_average_ground_temperature))
return ",".join(out)
|
Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
|
entailment
|
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
count = int(vals[i])
i += 1
for _ in range(count):
obj = GroundTemperature()
obj.read(vals[i:i + obj.field_count])
self.add_ground_temperature(obj)
i += obj.field_count
|
Read values.
Args:
vals (list): list of strings representing values
|
entailment
|
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.holiday_name = None
else:
self.holiday_name = vals[i]
i += 1
if len(vals[i]) == 0:
self.holiday_day = None
else:
self.holiday_day = vals[i]
i += 1
|
Read values.
Args:
vals (list): list of strings representing values
|
entailment
|
def holiday_name(self, value=None):
"""Corresponds to IDD Field `holiday_name`
Args:
value (str): value for IDD Field `holiday_name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str '
'for field `holiday_name`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `holiday_name`')
self._holiday_name = value
|
Corresponds to IDD Field `holiday_name`
Args:
value (str): value for IDD Field `holiday_name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def holiday_day(self, value=None):
"""Corresponds to IDD Field `holiday_day`
Args:
value (str): value for IDD Field `holiday_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str '
'for field `holiday_day`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `holiday_day`')
self._holiday_day = value
|
Corresponds to IDD Field `holiday_day`
Args:
value (str): value for IDD Field `holiday_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.holiday_name))
out.append(self._to_str(self.holiday_day))
return ",".join(out)
|
Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
|
entailment
|
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.leapyear_observed = None
else:
self.leapyear_observed = vals[i]
i += 1
if len(vals[i]) == 0:
self.daylight_saving_start_day = None
else:
self.daylight_saving_start_day = vals[i]
i += 1
if len(vals[i]) == 0:
self.daylight_saving_end_day = None
else:
self.daylight_saving_end_day = vals[i]
i += 1
count = int(vals[i])
i += 1
for _ in range(count):
obj = Holiday()
obj.read(vals[i:i + obj.field_count])
self.add_holiday(obj)
i += obj.field_count
|
Read values.
Args:
vals (list): list of strings representing values
|
entailment
|
def leapyear_observed(self, value=None):
"""Corresponds to IDD Field `leapyear_observed` Yes if Leap Year will
be observed for this file No if Leap Year days (29 Feb) should be
ignored in this file.
Args:
value (str): value for IDD Field `leapyear_observed`
Accepted values are:
- Yes
- No
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str '
'for field `leapyear_observed`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `leapyear_observed`')
vals = set()
vals.add("Yes")
vals.add("No")
if value not in vals:
raise ValueError('value {} is not an accepted value for '
'field `leapyear_observed`'.format(value))
self._leapyear_observed = value
|
Corresponds to IDD Field `leapyear_observed` Yes if Leap Year will
be observed for this file No if Leap Year days (29 Feb) should be
ignored in this file.
Args:
value (str): value for IDD Field `leapyear_observed`
Accepted values are:
- Yes
- No
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def daylight_saving_start_day(self, value=None):
"""Corresponds to IDD Field `daylight_saving_start_day`
Args:
value (str): value for IDD Field `daylight_saving_start_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `daylight_saving_start_day`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `daylight_saving_start_day`')
self._daylight_saving_start_day = value
|
Corresponds to IDD Field `daylight_saving_start_day`
Args:
value (str): value for IDD Field `daylight_saving_start_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def daylight_saving_end_day(self, value=None):
"""Corresponds to IDD Field `daylight_saving_end_day`
Args:
value (str): value for IDD Field `daylight_saving_end_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `daylight_saving_end_day`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `daylight_saving_end_day`')
self._daylight_saving_end_day = value
|
Corresponds to IDD Field `daylight_saving_end_day`
Args:
value (str): value for IDD Field `daylight_saving_end_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.leapyear_observed))
out.append(self._to_str(self.daylight_saving_start_day))
out.append(self._to_str(self.daylight_saving_end_day))
out.append(str(len(self.holidays)))
for obj in self.holidays:
out.append(obj.export(top=False))
return ",".join(out)
|
Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
|
entailment
|
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.comments_1 = None
else:
self.comments_1 = vals[i]
i += 1
|
Read values.
Args:
vals (list): list of strings representing values
|
entailment
|
def comments_1(self, value=None):
"""Corresponds to IDD Field `comments_1`
Args:
value (str): value for IDD Field `comments_1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str '
'for field `comments_1`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `comments_1`')
self._comments_1 = value
|
Corresponds to IDD Field `comments_1`
Args:
value (str): value for IDD Field `comments_1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.comments_2 = None
else:
self.comments_2 = vals[i]
i += 1
|
Read values.
Args:
vals (list): list of strings representing values
|
entailment
|
def comments_2(self, value=None):
"""Corresponds to IDD Field `comments_2`
Args:
value (str): value for IDD Field `comments_2`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str '
'for field `comments_2`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `comments_2`')
self._comments_2 = value
|
Corresponds to IDD Field `comments_2`
Args:
value (str): value for IDD Field `comments_2`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.comments_2))
return ",".join(out)
|
Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
|
entailment
|
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.number_of_records_per_hour = None
else:
self.number_of_records_per_hour = vals[i]
i += 1
if len(vals[i]) == 0:
self.data_period_name_or_description = None
else:
self.data_period_name_or_description = vals[i]
i += 1
if len(vals[i]) == 0:
self.data_period_start_day_of_week = None
else:
self.data_period_start_day_of_week = vals[i]
i += 1
if len(vals[i]) == 0:
self.data_period_start_day = None
else:
self.data_period_start_day = vals[i]
i += 1
if len(vals[i]) == 0:
self.data_period_end_day = None
else:
self.data_period_end_day = vals[i]
i += 1
|
Read values.
Args:
vals (list): list of strings representing values
|
entailment
|
def number_of_records_per_hour(self, value=None):
"""Corresponds to IDD Field `number_of_records_per_hour`
Args:
value (int): value for IDD Field `number_of_records_per_hour`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError(
'value {} need to be of type int '
'for field `number_of_records_per_hour`'.format(value))
self._number_of_records_per_hour = value
|
Corresponds to IDD Field `number_of_records_per_hour`
Args:
value (int): value for IDD Field `number_of_records_per_hour`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def data_period_name_or_description(self, value=None):
"""Corresponds to IDD Field `data_period_name_or_description`
Args:
value (str): value for IDD Field `data_period_name_or_description`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `data_period_name_or_description`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `data_period_name_or_description`')
self._data_period_name_or_description = value
|
Corresponds to IDD Field `data_period_name_or_description`
Args:
value (str): value for IDD Field `data_period_name_or_description`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def data_period_start_day_of_week(self, value=None):
"""Corresponds to IDD Field `data_period_start_day_of_week`
Args:
value (str): value for IDD Field `data_period_start_day_of_week`
Accepted values are:
- Sunday
- Monday
- Tuesday
- Wednesday
- Thursday
- Friday
- Saturday
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `data_period_start_day_of_week`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `data_period_start_day_of_week`')
vals = set()
vals.add("Sunday")
vals.add("Monday")
vals.add("Tuesday")
vals.add("Wednesday")
vals.add("Thursday")
vals.add("Friday")
vals.add("Saturday")
if value not in vals:
raise ValueError(
'value {} is not an accepted value for '
'field `data_period_start_day_of_week`'.format(value))
self._data_period_start_day_of_week = value
|
Corresponds to IDD Field `data_period_start_day_of_week`
Args:
value (str): value for IDD Field `data_period_start_day_of_week`
Accepted values are:
- Sunday
- Monday
- Tuesday
- Wednesday
- Thursday
- Friday
- Saturday
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def data_period_start_day(self, value=None):
"""Corresponds to IDD Field `data_period_start_day`
Args:
value (str): value for IDD Field `data_period_start_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `data_period_start_day`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `data_period_start_day`')
self._data_period_start_day = value
|
Corresponds to IDD Field `data_period_start_day`
Args:
value (str): value for IDD Field `data_period_start_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def data_period_end_day(self, value=None):
"""Corresponds to IDD Field `data_period_end_day`
Args:
value (str): value for IDD Field `data_period_end_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `data_period_end_day`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `data_period_end_day`')
self._data_period_end_day = value
|
Corresponds to IDD Field `data_period_end_day`
Args:
value (str): value for IDD Field `data_period_end_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.number_of_records_per_hour))
out.append(self._to_str(self.data_period_name_or_description))
out.append(self._to_str(self.data_period_start_day_of_week))
out.append(self._to_str(self.data_period_start_day))
out.append(self._to_str(self.data_period_end_day))
return ",".join(out)
|
Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
|
entailment
|
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
count = int(vals[i])
i += 1
for _ in range(count):
obj = DataPeriod()
obj.read(vals[i:i + obj.field_count])
self.add_data_period(obj)
i += obj.field_count
|
Read values.
Args:
vals (list): list of strings representing values
|
entailment
|
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.year = None
else:
self.year = vals[i]
i += 1
if len(vals[i]) == 0:
self.month = None
else:
self.month = vals[i]
i += 1
if len(vals[i]) == 0:
self.day = None
else:
self.day = vals[i]
i += 1
if len(vals[i]) == 0:
self.hour = None
else:
self.hour = vals[i]
i += 1
if len(vals[i]) == 0:
self.minute = None
else:
self.minute = vals[i]
i += 1
if len(vals[i]) == 0:
self.data_source_and_uncertainty_flags = None
else:
self.data_source_and_uncertainty_flags = vals[i]
i += 1
if len(vals[i]) == 0:
self.dry_bulb_temperature = None
else:
self.dry_bulb_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.dew_point_temperature = None
else:
self.dew_point_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.relative_humidity = None
else:
self.relative_humidity = vals[i]
i += 1
if len(vals[i]) == 0:
self.atmospheric_station_pressure = None
else:
self.atmospheric_station_pressure = vals[i]
i += 1
if len(vals[i]) == 0:
self.extraterrestrial_horizontal_radiation = None
else:
self.extraterrestrial_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.extraterrestrial_direct_normal_radiation = None
else:
self.extraterrestrial_direct_normal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.horizontal_infrared_radiation_intensity = None
else:
self.horizontal_infrared_radiation_intensity = vals[i]
i += 1
if len(vals[i]) == 0:
self.global_horizontal_radiation = None
else:
self.global_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.direct_normal_radiation = None
else:
self.direct_normal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.diffuse_horizontal_radiation = None
else:
self.diffuse_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.global_horizontal_illuminance = None
else:
self.global_horizontal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.direct_normal_illuminance = None
else:
self.direct_normal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.diffuse_horizontal_illuminance = None
else:
self.diffuse_horizontal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.zenith_luminance = None
else:
self.zenith_luminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.wind_direction = None
else:
self.wind_direction = vals[i]
i += 1
if len(vals[i]) == 0:
self.wind_speed = None
else:
self.wind_speed = vals[i]
i += 1
if len(vals[i]) == 0:
self.total_sky_cover = None
else:
self.total_sky_cover = vals[i]
i += 1
if len(vals[i]) == 0:
self.opaque_sky_cover = None
else:
self.opaque_sky_cover = vals[i]
i += 1
if len(vals[i]) == 0:
self.visibility = None
else:
self.visibility = vals[i]
i += 1
if len(vals[i]) == 0:
self.ceiling_height = None
else:
self.ceiling_height = vals[i]
i += 1
if len(vals[i]) == 0:
self.present_weather_observation = None
else:
self.present_weather_observation = vals[i]
i += 1
if len(vals[i]) == 0:
self.present_weather_codes = None
else:
self.present_weather_codes = vals[i]
i += 1
if len(vals[i]) == 0:
self.precipitable_water = None
else:
self.precipitable_water = vals[i]
i += 1
if len(vals[i]) == 0:
self.aerosol_optical_depth = None
else:
self.aerosol_optical_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.snow_depth = None
else:
self.snow_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.days_since_last_snowfall = None
else:
self.days_since_last_snowfall = vals[i]
i += 1
if len(vals[i]) == 0:
self.albedo = None
else:
self.albedo = vals[i]
i += 1
if len(vals[i]) == 0:
self.liquid_precipitation_depth = None
else:
self.liquid_precipitation_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.liquid_precipitation_quantity = None
else:
self.liquid_precipitation_quantity = vals[i]
i += 1
|
Read values.
Args:
vals (list): list of strings representing values
|
entailment
|
def year(self, value=None):
"""Corresponds to IDD Field `year`
Args:
value (int): value for IDD Field `year`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int '
'for field `year`'.format(value))
self._year = value
|
Corresponds to IDD Field `year`
Args:
value (int): value for IDD Field `year`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def month(self, value=None):
"""Corresponds to IDD Field `month`
Args:
value (int): value for IDD Field `month`
value >= 1
value <= 12
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int '
'for field `month`'.format(value))
if value < 1:
raise ValueError('value need to be greater or equal 1 '
'for field `month`')
if value > 12:
raise ValueError('value need to be smaller 12 '
'for field `month`')
self._month = value
|
Corresponds to IDD Field `month`
Args:
value (int): value for IDD Field `month`
value >= 1
value <= 12
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def day(self, value=None):
"""Corresponds to IDD Field `day`
Args:
value (int): value for IDD Field `day`
value >= 1
value <= 31
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int '
'for field `day`'.format(value))
if value < 1:
raise ValueError('value need to be greater or equal 1 '
'for field `day`')
if value > 31:
raise ValueError('value need to be smaller 31 '
'for field `day`')
self._day = value
|
Corresponds to IDD Field `day`
Args:
value (int): value for IDD Field `day`
value >= 1
value <= 31
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def hour(self, value=None):
"""Corresponds to IDD Field `hour`
Args:
value (int): value for IDD Field `hour`
value >= 1
value <= 24
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int '
'for field `hour`'.format(value))
if value < 1:
raise ValueError('value need to be greater or equal 1 '
'for field `hour`')
if value > 24:
raise ValueError('value need to be smaller 24 '
'for field `hour`')
self._hour = value
|
Corresponds to IDD Field `hour`
Args:
value (int): value for IDD Field `hour`
value >= 1
value <= 24
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def minute(self, value=None):
"""Corresponds to IDD Field `minute`
Args:
value (int): value for IDD Field `minute`
value >= 0
value <= 60
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int '
'for field `minute`'.format(value))
if value < 0:
raise ValueError('value need to be greater or equal 0 '
'for field `minute`')
if value > 60:
raise ValueError('value need to be smaller 60 '
'for field `minute`')
self._minute = value
|
Corresponds to IDD Field `minute`
Args:
value (int): value for IDD Field `minute`
value >= 0
value <= 60
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def data_source_and_uncertainty_flags(self, value=None):
"""Corresponds to IDD Field `data_source_and_uncertainty_flags` Initial
day of weather file is checked by EnergyPlus for validity (as shown
below) Each field is checked for "missing" as shown below. Reasonable
values, calculated values or the last "good" value is substituted.
Args:
value (str): value for IDD Field `data_source_and_uncertainty_flags`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `data_source_and_uncertainty_flags`'.format(value))
if ',' in value:
raise ValueError(
'value should not contain a comma '
'for field `data_source_and_uncertainty_flags`')
self._data_source_and_uncertainty_flags = value
|
Corresponds to IDD Field `data_source_and_uncertainty_flags` Initial
day of weather file is checked by EnergyPlus for validity (as shown
below) Each field is checked for "missing" as shown below. Reasonable
values, calculated values or the last "good" value is substituted.
Args:
value (str): value for IDD Field `data_source_and_uncertainty_flags`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def dry_bulb_temperature(self, value=99.9):
"""Corresponds to IDD Field `dry_bulb_temperature`
Args:
value (float): value for IDD Field `dry_bulb_temperature`
Unit: C
value > -70.0
value < 70.0
Missing value: 99.9
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `dry_bulb_temperature`'.format(value))
if value <= -70.0:
raise ValueError('value need to be greater -70.0 '
'for field `dry_bulb_temperature`')
if value >= 70.0:
raise ValueError('value need to be smaller 70.0 '
'for field `dry_bulb_temperature`')
self._dry_bulb_temperature = value
|
Corresponds to IDD Field `dry_bulb_temperature`
Args:
value (float): value for IDD Field `dry_bulb_temperature`
Unit: C
value > -70.0
value < 70.0
Missing value: 99.9
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def dew_point_temperature(self, value=99.9):
"""Corresponds to IDD Field `dew_point_temperature`
Args:
value (float): value for IDD Field `dew_point_temperature`
Unit: C
value > -70.0
value < 70.0
Missing value: 99.9
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `dew_point_temperature`'.format(value))
if value <= -70.0:
raise ValueError('value need to be greater -70.0 '
'for field `dew_point_temperature`')
if value >= 70.0:
raise ValueError('value need to be smaller 70.0 '
'for field `dew_point_temperature`')
self._dew_point_temperature = value
|
Corresponds to IDD Field `dew_point_temperature`
Args:
value (float): value for IDD Field `dew_point_temperature`
Unit: C
value > -70.0
value < 70.0
Missing value: 99.9
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def relative_humidity(self, value=999):
"""Corresponds to IDD Field `relative_humidity`
Args:
value (int): value for IDD Field `relative_humidity`
value >= 0
value <= 110
Missing value: 999
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int '
'for field `relative_humidity`'.format(value))
if value < 0:
raise ValueError('value need to be greater or equal 0 '
'for field `relative_humidity`')
if value > 110:
raise ValueError('value need to be smaller 110 '
'for field `relative_humidity`')
self._relative_humidity = value
|
Corresponds to IDD Field `relative_humidity`
Args:
value (int): value for IDD Field `relative_humidity`
value >= 0
value <= 110
Missing value: 999
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def atmospheric_station_pressure(self, value=999999):
"""Corresponds to IDD Field `atmospheric_station_pressure`
Args:
value (int): value for IDD Field `atmospheric_station_pressure`
Unit: Pa
value > 31000
value < 120000
Missing value: 999999
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError(
'value {} need to be of type int '
'for field `atmospheric_station_pressure`'.format(value))
if value <= 31000:
raise ValueError('value need to be greater 31000 '
'for field `atmospheric_station_pressure`')
if value >= 120000:
raise ValueError('value need to be smaller 120000 '
'for field `atmospheric_station_pressure`')
self._atmospheric_station_pressure = value
|
Corresponds to IDD Field `atmospheric_station_pressure`
Args:
value (int): value for IDD Field `atmospheric_station_pressure`
Unit: Pa
value > 31000
value < 120000
Missing value: 999999
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def extraterrestrial_horizontal_radiation(self, value=9999.0):
"""Corresponds to IDD Field `extraterrestrial_horizontal_radiation`
Args:
value (float): value for IDD Field `extraterrestrial_horizontal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `extraterrestrial_horizontal_radiation`'.format(value))
if value < 0.0:
raise ValueError(
'value need to be greater or equal 0.0 '
'for field `extraterrestrial_horizontal_radiation`')
self._extraterrestrial_horizontal_radiation = value
|
Corresponds to IDD Field `extraterrestrial_horizontal_radiation`
Args:
value (float): value for IDD Field `extraterrestrial_horizontal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def extraterrestrial_direct_normal_radiation(self, value=9999.0):
"""Corresponds to IDD Field `extraterrestrial_direct_normal_radiation`
Args:
value (float): value for IDD Field `extraterrestrial_direct_normal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `extraterrestrial_direct_normal_radiation`'.format(value))
if value < 0.0:
raise ValueError(
'value need to be greater or equal 0.0 '
'for field `extraterrestrial_direct_normal_radiation`')
self._extraterrestrial_direct_normal_radiation = value
|
Corresponds to IDD Field `extraterrestrial_direct_normal_radiation`
Args:
value (float): value for IDD Field `extraterrestrial_direct_normal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def horizontal_infrared_radiation_intensity(self, value=9999.0):
"""Corresponds to IDD Field `horizontal_infrared_radiation_intensity`
Args:
value (float): value for IDD Field `horizontal_infrared_radiation_intensity`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `horizontal_infrared_radiation_intensity`'.format(value))
if value < 0.0:
raise ValueError(
'value need to be greater or equal 0.0 '
'for field `horizontal_infrared_radiation_intensity`')
self._horizontal_infrared_radiation_intensity = value
|
Corresponds to IDD Field `horizontal_infrared_radiation_intensity`
Args:
value (float): value for IDD Field `horizontal_infrared_radiation_intensity`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def global_horizontal_radiation(self, value=9999.0):
"""Corresponds to IDD Field `global_horizontal_radiation`
Args:
value (float): value for IDD Field `global_horizontal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `global_horizontal_radiation`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `global_horizontal_radiation`')
self._global_horizontal_radiation = value
|
Corresponds to IDD Field `global_horizontal_radiation`
Args:
value (float): value for IDD Field `global_horizontal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def direct_normal_radiation(self, value=9999.0):
"""Corresponds to IDD Field `direct_normal_radiation`
Args:
value (float): value for IDD Field `direct_normal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `direct_normal_radiation`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `direct_normal_radiation`')
self._direct_normal_radiation = value
|
Corresponds to IDD Field `direct_normal_radiation`
Args:
value (float): value for IDD Field `direct_normal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def diffuse_horizontal_radiation(self, value=9999.0):
"""Corresponds to IDD Field `diffuse_horizontal_radiation`
Args:
value (float): value for IDD Field `diffuse_horizontal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `diffuse_horizontal_radiation`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `diffuse_horizontal_radiation`')
self._diffuse_horizontal_radiation = value
|
Corresponds to IDD Field `diffuse_horizontal_radiation`
Args:
value (float): value for IDD Field `diffuse_horizontal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def global_horizontal_illuminance(self, value=999999.0):
""" Corresponds to IDD Field `global_horizontal_illuminance`
will be missing if >= 999900
Args:
value (float): value for IDD Field `global_horizontal_illuminance`
Unit: lux
value >= 0.0
Missing value: 999999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `global_horizontal_illuminance`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `global_horizontal_illuminance`')
self._global_horizontal_illuminance = value
|
Corresponds to IDD Field `global_horizontal_illuminance`
will be missing if >= 999900
Args:
value (float): value for IDD Field `global_horizontal_illuminance`
Unit: lux
value >= 0.0
Missing value: 999999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def direct_normal_illuminance(self, value=999999.0):
""" Corresponds to IDD Field `direct_normal_illuminance`
will be missing if >= 999900
Args:
value (float): value for IDD Field `direct_normal_illuminance`
Unit: lux
value >= 0.0
Missing value: 999999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `direct_normal_illuminance`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `direct_normal_illuminance`')
self._direct_normal_illuminance = value
|
Corresponds to IDD Field `direct_normal_illuminance`
will be missing if >= 999900
Args:
value (float): value for IDD Field `direct_normal_illuminance`
Unit: lux
value >= 0.0
Missing value: 999999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def diffuse_horizontal_illuminance(self, value=999999.0):
""" Corresponds to IDD Field `diffuse_horizontal_illuminance`
will be missing if >= 999900
Args:
value (float): value for IDD Field `diffuse_horizontal_illuminance`
Unit: lux
value >= 0.0
Missing value: 999999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `diffuse_horizontal_illuminance`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `diffuse_horizontal_illuminance`')
self._diffuse_horizontal_illuminance = value
|
Corresponds to IDD Field `diffuse_horizontal_illuminance`
will be missing if >= 999900
Args:
value (float): value for IDD Field `diffuse_horizontal_illuminance`
Unit: lux
value >= 0.0
Missing value: 999999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def zenith_luminance(self, value=9999.0):
""" Corresponds to IDD Field `zenith_luminance`
will be missing if >= 9999
Args:
value (float): value for IDD Field `zenith_luminance`
Unit: Cd/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `zenith_luminance`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `zenith_luminance`')
self._zenith_luminance = value
|
Corresponds to IDD Field `zenith_luminance`
will be missing if >= 9999
Args:
value (float): value for IDD Field `zenith_luminance`
Unit: Cd/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def wind_direction(self, value=999.0):
"""Corresponds to IDD Field `wind_direction`
Args:
value (float): value for IDD Field `wind_direction`
Unit: degrees
value >= 0.0
value <= 360.0
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `wind_direction`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `wind_direction`')
if value > 360.0:
raise ValueError('value need to be smaller 360.0 '
'for field `wind_direction`')
self._wind_direction = value
|
Corresponds to IDD Field `wind_direction`
Args:
value (float): value for IDD Field `wind_direction`
Unit: degrees
value >= 0.0
value <= 360.0
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def wind_speed(self, value=999.0):
"""Corresponds to IDD Field `wind_speed`
Args:
value (float): value for IDD Field `wind_speed`
Unit: m/s
value >= 0.0
value <= 40.0
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `wind_speed`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `wind_speed`')
if value > 40.0:
raise ValueError('value need to be smaller 40.0 '
'for field `wind_speed`')
self._wind_speed = value
|
Corresponds to IDD Field `wind_speed`
Args:
value (float): value for IDD Field `wind_speed`
Unit: m/s
value >= 0.0
value <= 40.0
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def total_sky_cover(self, value=99.0):
"""Corresponds to IDD Field `total_sky_cover` This is the value for
total sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena at the hour indicated at the time indicated.)
Args:
value (float): value for IDD Field `total_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `total_sky_cover`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `total_sky_cover`')
if value > 10.0:
raise ValueError('value need to be smaller 10.0 '
'for field `total_sky_cover`')
self._total_sky_cover = value
|
Corresponds to IDD Field `total_sky_cover` This is the value for
total sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena at the hour indicated at the time indicated.)
Args:
value (float): value for IDD Field `total_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def opaque_sky_cover(self, value=99.0):
"""Corresponds to IDD Field `opaque_sky_cover` This is the value for
opaque sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena that prevent observing the sky or higher cloud
layers at the time indicated.) This is not used unless the field for
Horizontal Infrared Radiation Intensity is missing and then it is used
to calculate Horizontal Infrared Radiation Intensity.
Args:
value (float): value for IDD Field `opaque_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `opaque_sky_cover`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `opaque_sky_cover`')
if value > 10.0:
raise ValueError('value need to be smaller 10.0 '
'for field `opaque_sky_cover`')
self._opaque_sky_cover = value
|
Corresponds to IDD Field `opaque_sky_cover` This is the value for
opaque sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena that prevent observing the sky or higher cloud
layers at the time indicated.) This is not used unless the field for
Horizontal Infrared Radiation Intensity is missing and then it is used
to calculate Horizontal Infrared Radiation Intensity.
Args:
value (float): value for IDD Field `opaque_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def visibility(self, value=9999.0):
"""Corresponds to IDD Field `visibility` This is the value for
visibility in km. (Horizontal visibility at the time indicated.)
Args:
value (float): value for IDD Field `visibility`
Unit: km
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `visibility`'.format(value))
self._visibility = value
|
Corresponds to IDD Field `visibility` This is the value for
visibility in km. (Horizontal visibility at the time indicated.)
Args:
value (float): value for IDD Field `visibility`
Unit: km
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def ceiling_height(self, value=99999.0):
"""Corresponds to IDD Field `ceiling_height` This is the value for
ceiling height in m. (77777 is unlimited ceiling height. 88888 is
cirroform ceiling.) It is not currently used in EnergyPlus
calculations.
Args:
value (float): value for IDD Field `ceiling_height`
Unit: m
Missing value: 99999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `ceiling_height`'.format(value))
self._ceiling_height = value
|
Corresponds to IDD Field `ceiling_height` This is the value for
ceiling height in m. (77777 is unlimited ceiling height. 88888 is
cirroform ceiling.) It is not currently used in EnergyPlus
calculations.
Args:
value (float): value for IDD Field `ceiling_height`
Unit: m
Missing value: 99999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def present_weather_observation(self, value=None):
"""Corresponds to IDD Field `present_weather_observation` If the value
of the field is 0, then the observed weather codes are taken from the
following field. If the value of the field is 9, then "missing" weather
is assumed. Since the primary use of these fields (Present Weather
Observation and Present Weather Codes) is for rain/wet surfaces, a
missing observation field or a missing weather code implies "no rain".
Args:
value (int): value for IDD Field `present_weather_observation`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError(
'value {} need to be of type int '
'for field `present_weather_observation`'.format(value))
self._present_weather_observation = value
|
Corresponds to IDD Field `present_weather_observation` If the value
of the field is 0, then the observed weather codes are taken from the
following field. If the value of the field is 9, then "missing" weather
is assumed. Since the primary use of these fields (Present Weather
Observation and Present Weather Codes) is for rain/wet surfaces, a
missing observation field or a missing weather code implies "no rain".
Args:
value (int): value for IDD Field `present_weather_observation`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def present_weather_codes(self, value=None):
"""Corresponds to IDD Field `present_weather_codes`
Args:
value (int): value for IDD Field `present_weather_codes`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError(
'value {} need to be of type int '
'for field `present_weather_codes`'.format(value))
self._present_weather_codes = value
|
Corresponds to IDD Field `present_weather_codes`
Args:
value (int): value for IDD Field `present_weather_codes`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def precipitable_water(self, value=999.0):
"""Corresponds to IDD Field `precipitable_water`
Args:
value (float): value for IDD Field `precipitable_water`
Unit: mm
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `precipitable_water`'.format(value))
self._precipitable_water = value
|
Corresponds to IDD Field `precipitable_water`
Args:
value (float): value for IDD Field `precipitable_water`
Unit: mm
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def aerosol_optical_depth(self, value=0.999):
"""Corresponds to IDD Field `aerosol_optical_depth`
Args:
value (float): value for IDD Field `aerosol_optical_depth`
Unit: thousandths
Missing value: 0.999
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `aerosol_optical_depth`'.format(value))
self._aerosol_optical_depth = value
|
Corresponds to IDD Field `aerosol_optical_depth`
Args:
value (float): value for IDD Field `aerosol_optical_depth`
Unit: thousandths
Missing value: 0.999
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def snow_depth(self, value=999.0):
"""Corresponds to IDD Field `snow_depth`
Args:
value (float): value for IDD Field `snow_depth`
Unit: cm
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `snow_depth`'.format(value))
self._snow_depth = value
|
Corresponds to IDD Field `snow_depth`
Args:
value (float): value for IDD Field `snow_depth`
Unit: cm
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def days_since_last_snowfall(self, value=99):
"""Corresponds to IDD Field `days_since_last_snowfall`
Args:
value (int): value for IDD Field `days_since_last_snowfall`
Missing value: 99
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError(
'value {} need to be of type int '
'for field `days_since_last_snowfall`'.format(value))
self._days_since_last_snowfall = value
|
Corresponds to IDD Field `days_since_last_snowfall`
Args:
value (int): value for IDD Field `days_since_last_snowfall`
Missing value: 99
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def albedo(self, value=999.0):
"""Corresponds to IDD Field `albedo`
Args:
value (float): value for IDD Field `albedo`
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `albedo`'.format(value))
self._albedo = value
|
Corresponds to IDD Field `albedo`
Args:
value (float): value for IDD Field `albedo`
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def liquid_precipitation_depth(self, value=999.0):
"""Corresponds to IDD Field `liquid_precipitation_depth`
Args:
value (float): value for IDD Field `liquid_precipitation_depth`
Unit: mm
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `liquid_precipitation_depth`'.format(value))
self._liquid_precipitation_depth = value
|
Corresponds to IDD Field `liquid_precipitation_depth`
Args:
value (float): value for IDD Field `liquid_precipitation_depth`
Unit: mm
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def liquid_precipitation_quantity(self, value=99.0):
"""Corresponds to IDD Field `liquid_precipitation_quantity`
Args:
value (float): value for IDD Field `liquid_precipitation_quantity`
Unit: hr
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `liquid_precipitation_quantity`'.format(value))
self._liquid_precipitation_quantity = value
|
Corresponds to IDD Field `liquid_precipitation_quantity`
Args:
value (float): value for IDD Field `liquid_precipitation_quantity`
Unit: hr
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
entailment
|
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.year))
out.append(self._to_str(self.month))
out.append(self._to_str(self.day))
out.append(self._to_str(self.hour))
out.append(self._to_str(self.minute))
out.append(self._to_str(self.data_source_and_uncertainty_flags))
out.append(self._to_str(self.dry_bulb_temperature))
out.append(self._to_str(self.dew_point_temperature))
out.append(self._to_str(self.relative_humidity))
out.append(self._to_str(self.atmospheric_station_pressure))
out.append(self._to_str(self.extraterrestrial_horizontal_radiation))
out.append(self._to_str(self.extraterrestrial_direct_normal_radiation))
out.append(self._to_str(self.horizontal_infrared_radiation_intensity))
out.append(self._to_str(self.global_horizontal_radiation))
out.append(self._to_str(self.direct_normal_radiation))
out.append(self._to_str(self.diffuse_horizontal_radiation))
out.append(self._to_str(self.global_horizontal_illuminance))
out.append(self._to_str(self.direct_normal_illuminance))
out.append(self._to_str(self.diffuse_horizontal_illuminance))
out.append(self._to_str(self.zenith_luminance))
out.append(self._to_str(self.wind_direction))
out.append(self._to_str(self.wind_speed))
out.append(self._to_str(self.total_sky_cover))
out.append(self._to_str(self.opaque_sky_cover))
out.append(self._to_str(self.visibility))
out.append(self._to_str(self.ceiling_height))
out.append(self._to_str(self.present_weather_observation))
out.append(self._to_str(self.present_weather_codes))
out.append(self._to_str(self.precipitable_water))
out.append(self._to_str(self.aerosol_optical_depth))
out.append(self._to_str(self.snow_depth))
out.append(self._to_str(self.days_since_last_snowfall))
out.append(self._to_str(self.albedo))
out.append(self._to_str(self.liquid_precipitation_depth))
out.append(self._to_str(self.liquid_precipitation_quantity))
return ",".join(out)
|
Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
|
entailment
|
def add_weatherdata(self, data):
"""Appends weather data.
Args:
data (WeatherData): weather data object
"""
if not isinstance(data, WeatherData):
raise ValueError('Weather data need to be of type WeatherData')
self._data["WEATHER DATA"].append(data)
|
Appends weather data.
Args:
data (WeatherData): weather data object
|
entailment
|
def save(self, path, check=True):
"""Save WeatherData in EPW format to path.
Args:
path (str): path where EPW file should be saved
"""
with open(path, 'w') as f:
if check:
if ("LOCATION" not in self._data or
self._data["LOCATION"] is None):
raise ValueError('location is not valid.')
if ("DESIGN CONDITIONS" not in self._data or
self._data["DESIGN CONDITIONS"] is None):
raise ValueError('design_conditions is not valid.')
if ("TYPICAL/EXTREME PERIODS" not in self._data or
self._data["TYPICAL/EXTREME PERIODS"] is None):
raise ValueError(
'typical_or_extreme_periods is not valid.')
if ("GROUND TEMPERATURES" not in self._data or
self._data["GROUND TEMPERATURES"] is None):
raise ValueError('ground_temperatures is not valid.')
if ("HOLIDAYS/DAYLIGHT SAVINGS" not in self._data or
self._data["HOLIDAYS/DAYLIGHT SAVINGS"] is None):
raise ValueError(
'holidays_or_daylight_savings is not valid.')
if ("COMMENTS 1" not in self._data or
self._data["COMMENTS 1"] is None):
raise ValueError('comments_1 is not valid.')
if ("COMMENTS 2" not in self._data or
self._data["COMMENTS 2"] is None):
raise ValueError('comments_2 is not valid.')
if ("DATA PERIODS" not in self._data or
self._data["DATA PERIODS"] is None):
raise ValueError('data_periods is not valid.')
if ("LOCATION" in self._data and
self._data["LOCATION"] is not None):
f.write(self._data["LOCATION"].export() + "\n")
if ("DESIGN CONDITIONS" in self._data and
self._data["DESIGN CONDITIONS"] is not None):
f.write(self._data["DESIGN CONDITIONS"].export() + "\n")
if ("TYPICAL/EXTREME PERIODS" in self._data and
self._data["TYPICAL/EXTREME PERIODS"] is not None):
f.write(self._data["TYPICAL/EXTREME PERIODS"].export() + "\n")
if ("GROUND TEMPERATURES" in self._data and
self._data["GROUND TEMPERATURES"] is not None):
f.write(self._data["GROUND TEMPERATURES"].export() + "\n")
if ("HOLIDAYS/DAYLIGHT SAVINGS" in self._data and
self._data["HOLIDAYS/DAYLIGHT SAVINGS"] is not None):
f.write(
self._data["HOLIDAYS/DAYLIGHT SAVINGS"].export() +
"\n")
if ("COMMENTS 1" in self._data and
self._data["COMMENTS 1"] is not None):
f.write(self._data["COMMENTS 1"].export() + "\n")
if ("COMMENTS 2" in self._data and
self._data["COMMENTS 2"] is not None):
f.write(self._data["COMMENTS 2"].export() + "\n")
if ("DATA PERIODS" in self._data and
self._data["DATA PERIODS"] is not None):
f.write(self._data["DATA PERIODS"].export() + "\n")
for item in self._data["WEATHER DATA"]:
f.write(item.export(False) + "\n")
|
Save WeatherData in EPW format to path.
Args:
path (str): path where EPW file should be saved
|
entailment
|
def _create_datadict(cls, internal_name):
"""Creates an object depending on `internal_name`
Args:
internal_name (str): IDD name
Raises:
ValueError: if `internal_name` cannot be matched to a data dictionary object
"""
if internal_name == "LOCATION":
return Location()
if internal_name == "DESIGN CONDITIONS":
return DesignConditions()
if internal_name == "TYPICAL/EXTREME PERIODS":
return TypicalOrExtremePeriods()
if internal_name == "GROUND TEMPERATURES":
return GroundTemperatures()
if internal_name == "HOLIDAYS/DAYLIGHT SAVINGS":
return HolidaysOrDaylightSavings()
if internal_name == "COMMENTS 1":
return Comments1()
if internal_name == "COMMENTS 2":
return Comments2()
if internal_name == "DATA PERIODS":
return DataPeriods()
raise ValueError(
"No DataDictionary known for {}".format(internal_name))
|
Creates an object depending on `internal_name`
Args:
internal_name (str): IDD name
Raises:
ValueError: if `internal_name` cannot be matched to a data dictionary object
|
entailment
|
def read(self, path):
"""Read EPW weather data from path.
Args:
path (str): path to read weather data from
"""
with open(path, "r") as f:
for line in f:
line = line.strip()
match_obj_name = re.search(r"^([A-Z][A-Z/ \d]+),", line)
if match_obj_name is not None:
internal_name = match_obj_name.group(1)
if internal_name in self._data:
self._data[internal_name] = self._create_datadict(
internal_name)
data_line = line[len(internal_name) + 1:]
vals = data_line.strip().split(',')
self._data[internal_name].read(vals)
else:
wd = WeatherData()
wd.read(line.strip().split(','))
self.add_weatherdata(wd)
|
Read EPW weather data from path.
Args:
path (str): path to read weather data from
|
entailment
|
def display_url(target):
"""Displaying URL in an IPython notebook to allow the user to click and check on information. With thanks to Fernando Perez for putting together the implementation!
:param target: the url to display.
:type target: string."""
prefix = u"http://" if not target.startswith("http") else u""
target = prefix + target
display(HTML(u'<a href="{t}" target=_blank>{t}</a>'.format(t=target)))
|
Displaying URL in an IPython notebook to allow the user to click and check on information. With thanks to Fernando Perez for putting together the implementation!
:param target: the url to display.
:type target: string.
|
entailment
|
def iframe_url(target, width=500, height=400, scrolling=True, border=0, frameborder=0):
"""Produce an iframe for displaying an item in HTML window.
:param target: the target url.
:type target: string
:param width: the width of the iframe (default 500).
:type width: int
:param height: the height of the iframe (default 400).
:type height: int
:param scrolling: whether or not to allow scrolling (default True).
:type scrolling: bool
:param border: width of the border.
:type border: int
:param frameborder: width of the frameborder.
:type frameborder: int"""
prefix = u"http://" if not target.startswith("http") else u""
target = prefix + target
if scrolling:
scroll_val = 'yes'
else:
scroll_val = 'no'
return u'<iframe frameborder="{frameborder}" scrolling="{scrolling}" style="border:{border}px" src="{url}", width={width} height={height}></iframe>'.format(frameborder=frameborder, scrolling=scroll_val, border=border, url=target, width=width, height=height)
|
Produce an iframe for displaying an item in HTML window.
:param target: the target url.
:type target: string
:param width: the width of the iframe (default 500).
:type width: int
:param height: the height of the iframe (default 400).
:type height: int
:param scrolling: whether or not to allow scrolling (default True).
:type scrolling: bool
:param border: width of the border.
:type border: int
:param frameborder: width of the frameborder.
:type frameborder: int
|
entailment
|
def display_iframe_url(target, **kwargs):
"""Display the contents of a URL in an IPython notebook.
:param target: the target url.
:type target: string
.. seealso:: `iframe_url()` for additional arguments."""
txt = iframe_url(target, **kwargs)
display(HTML(txt))
|
Display the contents of a URL in an IPython notebook.
:param target: the target url.
:type target: string
.. seealso:: `iframe_url()` for additional arguments.
|
entailment
|
def display_google_book(id, page=None, width=700, height=500, **kwargs):
"""Display an embedded version of a Google book.
:param id: the id of the google book to display.
:type id: string
:param page: the start page for the book.
:type id: string or int."""
if isinstance(page, int):
url = 'http://books.google.co.uk/books?id={id}&pg=PA{page}&output=embed'.format(id=id, page=page)
else:
url = 'http://books.google.co.uk/books?id={id}&pg={page}&output=embed'.format(id=id, page=page)
display_iframe_url(url, width=width, height=height, **kwargs)
|
Display an embedded version of a Google book.
:param id: the id of the google book to display.
:type id: string
:param page: the start page for the book.
:type id: string or int.
|
entailment
|
def code_toggle(start_show=False, message=None):
"""Toggling on and off code in a notebook.
:param start_show: Whether to display the code or not on first load (default is False).
:type start_show: bool
:param message: the message used to toggle display of the code.
:type message: string
The tip that this idea is
based on is from Damian Kao (http://blog.nextgenetics.net/?e=102)."""
html ='<script>\n'
if message is None:
message = u'The raw code for this jupyter notebook can be hidden for easier reading.'
if start_show:
html += u'code_show=true;\n'
else:
html += u'code_show=false;\n'
html+='''function code_toggle() {
if (code_show){
$('div.input').show();
} else {
$('div.input').hide();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
'''
html += message + ' To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.'
display(HTML(html))
|
Toggling on and off code in a notebook.
:param start_show: Whether to display the code or not on first load (default is False).
:type start_show: bool
:param message: the message used to toggle display of the code.
:type message: string
The tip that this idea is
based on is from Damian Kao (http://blog.nextgenetics.net/?e=102).
|
entailment
|
def display_prediction(basis, num_basis=4, wlim=(-1.,1.), fig=None, ax=None, xlim=None, ylim=None, num_points=1000, offset=0.0, **kwargs):
"""Interactive widget for displaying a prediction function based on summing separate basis functions.
:param basis: a function handle that calls the basis functions.
:type basis: function handle.
:param xlim: limits of the x axis to use.
:param ylim: limits of the y axis to use.
:param wlim: limits for the basis function weights."""
import numpy as np
import pylab as plt
if fig is not None:
if ax is None:
ax = fig.gca()
if xlim is None:
if ax is not None:
xlim = ax.get_xlim()
else:
xlim = (-2., 2.)
if ylim is None:
if ax is not None:
ylim = ax.get_ylim()
else:
ylim = (-1., 1.)
# initialise X and set up W arguments.
x = np.zeros((num_points, 1))
x[:, 0] = np.linspace(xlim[0], xlim[1], num_points)
param_args = {}
for i in range(num_basis):
lim = list(wlim)
if i ==0:
lim[0] += offset
lim[1] += offset
param_args['w_' + str(i)] = tuple(lim)
# helper function for making basis prediction.
def predict_basis(w, basis, x, num_basis, **kwargs):
Phi = basis(x, num_basis, **kwargs)
f = np.dot(Phi, w)
return f, Phi
if type(basis) is dict:
use_basis = basis[list(basis.keys())[0]]
else:
use_basis = basis
f, Phi = predict_basis(np.zeros((num_basis, 1)),
use_basis, x, num_basis,
**kwargs)
if fig is None:
fig, ax=plt.subplots(figsize=(12,4))
ax.set_ylim(ylim)
ax.set_xlim(xlim)
predline = ax.plot(x, f, linewidth=2)[0]
basislines = []
for i in range(num_basis):
basislines.append(ax.plot(x, Phi[:, i], 'r')[0])
ax.set_ylim(ylim)
ax.set_xlim(xlim)
def generate_function(basis, num_basis, predline, basislines, basis_args, display_basis, offset, **kwargs):
w = np.zeros((num_basis, 1))
for i in range(num_basis):
w[i] = kwargs['w_'+ str(i)]
f, Phi = predict_basis(w, basis, x, num_basis, **basis_args)
predline.set_xdata(x[:, 0])
predline.set_ydata(f)
for i in range(num_basis):
basislines[i].set_xdata(x[:, 0])
basislines[i].set_ydata(Phi[:, i])
if display_basis:
for i in range(num_basis):
basislines[i].set_alpha(1) # make visible
else:
for i in range(num_basis):
basislines[i].set_alpha(0)
display(fig)
if type(basis) is not dict:
basis = fixed(basis)
plt.close(fig)
interact(generate_function,
basis=basis,
num_basis=fixed(num_basis),
predline=fixed(predline),
basislines=fixed(basislines),
basis_args=fixed(kwargs),
offset = fixed(offset),
display_basis = False,
**param_args)
|
Interactive widget for displaying a prediction function based on summing separate basis functions.
:param basis: a function handle that calls the basis functions.
:type basis: function handle.
:param xlim: limits of the x axis to use.
:param ylim: limits of the y axis to use.
:param wlim: limits for the basis function weights.
|
entailment
|
def display_plots(filebase, directory=None, width=700, height=500, **kwargs):
"""Display a series of plots controlled by sliders. The function relies on Python string format functionality to index through a series of plots."""
def show_figure(filebase, directory, **kwargs):
"""Helper function to load in the relevant plot for display."""
filename = filebase.format(**kwargs)
if directory is not None:
filename = directory + '/' + filename
display(HTML("<img src='{filename}'>".format(filename=filename)))
interact(show_figure, filebase=fixed(filebase), directory=fixed(directory), **kwargs)
|
Display a series of plots controlled by sliders. The function relies on Python string format functionality to index through a series of plots.
|
entailment
|
def answer(part, module='mlai2014.json'):
"""Returns the answers to the lab classes."""
marks = json.load(open(os.path.join(data_directory, module), 'rb'))
return marks['Lab ' + str(part+1)]
|
Returns the answers to the lab classes.
|
entailment
|
def latex(self):
"""Gives a latex representation of the assessment."""
output = self.latex_preamble
output += self._repr_latex_()
output += self.latex_post
return output
|
Gives a latex representation of the assessment.
|
entailment
|
def html(self):
"""Gives an html representation of the assessment."""
output = self.html_preamble
output += self._repr_html_()
output += self.html_post
return output
|
Gives an html representation of the assessment.
|
entailment
|
def marksheet(self):
"""Returns an pandas empty dataframe object containing rows and columns for marking. This can then be passed to a google doc that is distributed to markers for editing with the mark for each section."""
columns=['Number', 'Question', 'Correct (a fraction)', 'Max Mark', 'Comments']
mark_sheet = pd.DataFrame()
for qu_number, question in enumerate(self.answers):
part_no = 0
for number, part in enumerate(question):
if number>0:
if part[2] > 0:
part_no += 1
index = str(qu_number+1) +'_'+str(part_no)
frame = pd.DataFrame(columns=columns, index=[index])
frame.loc[index]['Number'] = index
frame.loc[index]['Question'] = part[0]
frame.loc[index]['Max Mark'] = part[2]
mark_sheet = mark_sheet.append(frame)
return mark_sheet.sort(columns='Number')
|
Returns an pandas empty dataframe object containing rows and columns for marking. This can then be passed to a google doc that is distributed to markers for editing with the mark for each section.
|
entailment
|
def total_marks(self):
"""Compute the total mark for the assessment."""
total = 0
for answer in self.answers:
for number, part in enumerate(answer):
if number>0:
if part[2]>0:
total+=part[2]
return total
|
Compute the total mark for the assessment.
|
entailment
|
def download(name, course, github='SheffieldML/notebook/master/lab_classes/'):
"""Download a lab class from the relevant course
:param course: the course short name to download the class from.
:type course: string
:param reference: reference to the course for downloading the class.
:type reference: string
:param github: github repo for downloading the course from.
:type string: github repo for downloading the lab."""
github_stub = 'https://raw.githubusercontent.com/'
if not name.endswith('.ipynb'):
name += '.ipynb'
from pods.util import download_url
download_url(os.path.join(github_stub, github, course, name), store_directory=course)
|
Download a lab class from the relevant course
:param course: the course short name to download the class from.
:type course: string
:param reference: reference to the course for downloading the class.
:type reference: string
:param github: github repo for downloading the course from.
:type string: github repo for downloading the lab.
|
entailment
|
def read(self, vals):
""" Read values
Args:
vals (list): list of strings representing values
"""
i = 0
{%- for field in fields %}
{%- if field.is_list %}
count = int(vals[i])
i += 1
for _ in range(count):
obj = {{field.object_name}}()
obj.read(vals[i:i + obj.field_count])
self.add_{{field.field_name}}(obj)
i += obj.field_count
{%- else %}
if len(vals[i]) == 0:
self.{{field.field_name}} = None
else:
self.{{field.field_name}} = vals[i]
i += 1
{%- endif %}
{%- endfor %}
|
Read values
Args:
vals (list): list of strings representing values
|
entailment
|
def permute(num):
"Permutation for randomizing data order."
if permute_data:
return np.random.permutation(num)
else:
logging.warning("Warning not permuting data")
return np.arange(num)
|
Permutation for randomizing data order.
|
entailment
|
def discrete(cats, name='discrete'):
"""Return a class category that shows the encoding"""
import json
ks = list(cats)
for key in ks:
if isinstance(key, bytes):
cats[key.decode('utf-8')] = cats.pop(key)
return 'discrete(' + json.dumps([cats, name]) + ')'
|
Return a class category that shows the encoding
|
entailment
|
def prompt_stdin(prompt):
"""Ask user for agreeing to data set licenses."""
# raw_input returns the empty string for "enter"
yes = set(['yes', 'y'])
no = set(['no','n'])
try:
print(prompt)
if sys.version_info>=(3,0):
choice = input().lower()
else:
choice = raw_input().lower()
# would like to test for which exceptions here
except:
print('Stdin is not implemented.')
print('You need to set')
print('overide_manual_authorize=True')
print('to proceed with the download. Please set that variable and continue.')
raise
if choice in yes:
return True
elif choice in no:
return False
else:
print("Your response was a " + choice)
print("Please respond with 'yes', 'y' or 'no', 'n'")
|
Ask user for agreeing to data set licenses.
|
entailment
|
def clear_cache(dataset_name=None):
"""Remove a data set from the cache"""
dr = data_resources[dataset_name]
if 'dirs' in dr:
for dirs, files in zip(dr['dirs'], dr['files']):
for dir, file in zip(dirs, files):
path = os.path.join(data_path, dataset_name, dir, file)
if os.path.exists(path):
logging.info("clear_cache: removing " + path)
os.unlink(path)
for dir in dirs:
path = os.path.join(data_path, dataset_name, dir)
if os.path.exists(path):
logging.info("clear_cache: remove directory " + path)
os.rmdir(path)
else:
for file_list in dr['files']:
for file in file_list:
path = os.path.join(data_path, dataset_name, file)
if os.path.exists(path):
logging.info("clear_cache: remove " + path)
os.unlink(path)
|
Remove a data set from the cache
|
entailment
|
def data_available(dataset_name=None):
"""Check if the data set is available on the local machine already."""
dr = data_resources[dataset_name]
if 'dirs' in dr:
for dirs, files in zip(dr['dirs'], dr['files']):
for dir, file in zip(dirs, files):
if not os.path.exists(os.path.join(data_path, dataset_name, dir, file)):
return False
else:
for file_list in dr['files']:
for file in file_list:
if not os.path.exists(os.path.join(data_path, dataset_name, file)):
return False
return True
|
Check if the data set is available on the local machine already.
|
entailment
|
def download_data(dataset_name=None, prompt=prompt_stdin):
"""Check with the user that the are happy with terms and conditions for the data set, then download it."""
dr = data_resources[dataset_name]
if not authorize_download(dataset_name, prompt=prompt):
raise Exception("Permission to download data set denied.")
if 'suffices' in dr:
for url, files, suffices in zip(dr['urls'], dr['files'], dr['suffices']):
for file, suffix in zip(files, suffices):
download_url(url=os.path.join(url,file),
dir_name = data_path,
store_directory=dataset_name,
suffix=suffix)
elif 'dirs' in dr:
for url, dirs, files in zip(dr['urls'], dr['dirs'], dr['files']):
for file, dir in zip(files, dirs):
print(file, dir)
download_url(
url=os.path.join(url,dir,file),
dir_name = data_path,
store_directory=os.path.join(dataset_name,dir)
)
else:
for url, files in zip(dr['urls'], dr['files']):
for file in files:
download_url(
url=os.path.join(url,file),
dir_name = data_path,
store_directory=dataset_name
)
return True
|
Check with the user that the are happy with terms and conditions for the data set, then download it.
|
entailment
|
def df2arff(df, dataset_name, pods_data):
"""Write an arff file from a data set loaded in from pods"""
def java_simple_date(date_format):
date_format = date_format.replace('%Y', 'yyyy').replace('%m', 'MM').replace('%d', 'dd').replace('%H', 'HH')
return date_format.replace('%h', 'hh').replace('%M', 'mm').replace('%S', 'ss').replace('%f', 'SSSSSS')
def tidy_field(atr):
return str(atr).replace(' / ', '/').replace(' ', '_')
types = {'STRING': [str], 'INTEGER': [int, np.int64, np.uint8], 'REAL': [np.float64]}
d = {}
d['attributes'] = []
for atr in df.columns:
if isinstance(atr, str):
if len(atr)>8 and atr[:9] == 'discrete(':
import json
elements = json.loads(atr[9:-1])
d['attributes'].append((tidy_field(elements[1]),
list(elements[0].keys())))
mask = {}
c = pd.Series(index=df.index)
for key, val in elements[0].items():
mask = df[atr]==val
c[mask] = key
df[atr] = c
continue
if len(atr)>7 and atr[:8] == 'integer(':
name = atr[8:-1]
d['attributes'].append((tidy_field(name), 'INTEGER'))
df[atr] = df[atr].astype(int)
continue
if len(atr)>7 and atr[:8]=='datenum(':
from matplotlib.dates import num2date
elements = atr[8:-1].split(',')
d['attributes'].append((elements[0] + '_datenum_' + java_simple_date(elements[1]), 'STRING'))
df[atr] = num2date(df[atr].values) #
df[atr] = df[atr].dt.strftime(elements[1])
continue
if len(atr)>9 and atr[:10]=='timestamp(':
def timestamp2date(values):
import datetime
"""Convert timestamp into a date object"""
new = []
for value in values:
new.append(np.datetime64(datetime.datetime.fromtimestamp(value)))
return np.asarray(new)
elements = atr[10:-1].split(',')
d['attributes'].append((elements[0] + '_datenum_' + java_simple_date(elements[1]), 'STRING'))
df[atr] = timestamp2date(df[atr].values) #
df[atr] = df[atr].dt.strftime(elements[1])
continue
if len(atr)>10 and atr[:11]=='datetime64(':
elements = atr[11:-1].split(',')
d['attributes'].append((elements[0] + '_datenum_' + java_simple_date(elements[1]), 'STRING'))
df[atr] = df[atr].dt.strftime(elements[1])
continue
if len(atr)>11 and atr[:12]=='decimalyear(':
def decyear2date(values):
"""Convert decimal year into a date object"""
new = []
for i, decyear in enumerate(values):
year = int(np.floor(decyear))
dec = decyear-year
end = np.datetime64(str(year+1)+'-01-01')
start = np.datetime64(str(year)+'-01-01')
diff=end-start
days = dec*(diff/np.timedelta64(1, 'D'))
# round to nearest day
add = np.timedelta64(int(np.round(days)), 'D')
new.append(start+add)
return np.asarray(new)
elements = atr[12:-1].split(',')
d['attributes'].append((elements[0] + '_datenum_' + java_simple_date(elements[1]), 'STRING'))
df[atr] = decyear2date(df[atr].values) #
df[atr] = df[atr].dt.strftime(elements[1])
continue
field = tidy_field(atr)
el = df[atr][0]
type_assigned=False
for t in types:
if isinstance(el, tuple(types[t])):
d['attributes'].append((field, t))
type_assigned=True
break
if not type_assigned:
import json
d['attributes'].append((field+'_json', 'STRING'))
df[atr] = df[atr].apply(json.dumps)
d['data'] = []
for ind, row in df.iterrows():
d['data'].append(list(row))
import textwrap as tw
width = 78
d['description'] = dataset_name + "\n\n"
if 'info' in pods_data and pods_data['info']:
d['description'] += "\n".join(tw.wrap(pods_data['info'], width)) + "\n\n"
if 'details' in pods_data and pods_data['details']:
d['description'] += "\n".join(tw.wrap(pods_data['details'], width))
if 'citation' in pods_data and pods_data['citation']:
d['description'] += "\n\n" + "Citation" "\n\n" + "\n".join(tw.wrap(pods_data['citation'], width))
d['relation'] = dataset_name
import arff
string = arff.dumps(d)
import re
string = re.sub(r'\@ATTRIBUTE "?(.*)_datenum_(.*)"? STRING',
r'@ATTRIBUTE "\1" DATE [\2]',
string)
f = open(dataset_name + '.arff', 'w')
f.write(string)
f.close()
|
Write an arff file from a data set loaded in from pods
|
entailment
|
def to_arff(dataset, **kwargs):
"""Take a pods data set and write it as an ARFF file"""
pods_data = dataset(**kwargs)
vals = list(kwargs.values())
for i, v in enumerate(vals):
if isinstance(v, list):
vals[i] = '|'.join(v)
else:
vals[i] = str(v)
args = '_'.join(vals)
n = dataset.__name__
if len(args)>0:
n += '_' + args
n = n.replace(' ', '-')
ks = pods_data.keys()
d = None
if 'Y' in ks and 'X' in ks:
d = pd.DataFrame(pods_data['X'])
if 'Xtest' in ks:
d = d.append(pd.DataFrame(pods_data['Xtest']), ignore_index=True)
if 'covariates' in ks:
d.columns = pods_data['covariates']
dy = pd.DataFrame(pods_data['Y'])
if 'Ytest' in ks:
dy = dy.append(pd.DataFrame(pods_data['Ytest']), ignore_index=True)
if 'response' in ks:
dy.columns = pods_data['response']
for c in dy.columns:
if c not in d.columns:
d[c] = dy[c]
else:
d['y'+str(c)] = dy[c]
elif 'Y' in ks:
d = pd.DataFrame(pods_data['Y'])
if 'Ytest' in ks:
d = d.append(pd.DataFrame(pods_data['Ytest']), ignore_index=True)
elif 'data' in ks:
d = pd.DataFrame(pods_data['data'])
if d is not None:
df2arff(d, n, pods_data)
|
Take a pods data set and write it as an ARFF file
|
entailment
|
def epomeo_gpx(data_set='epomeo_gpx', sample_every=4):
"""Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run."""
import gpxpy
import gpxpy.gpx
if not data_available(data_set):
download_data(data_set)
files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet']
X = []
for file in files:
gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r')
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [point for track in gpx.tracks for segment in track.segments for point in segment.points]
data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
if pandas_available:
X = pd.DataFrame(X[0], columns=['seconds', 'latitude', 'longitude', 'elevation'])
X.set_index(keys='seconds', inplace=True)
return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
|
Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run.
|
entailment
|
def pmlr(volumes='all', data_set='pmlr'):
"""Abstracts from the Proceedings of Machine Learning Research"""
if not data_available(data_set):
download_data(data_set)
proceedings_file = open(os.path.join(data_path, data_set, 'proceedings.yaml'), 'r')
import yaml
proceedings = yaml.load(proceedings_file)
# Create a new resources entry for downloading contents of proceedings.
data_name_full = 'pmlr_volumes'
data_resources[data_name_full] = data_resources[data_set].copy()
data_resources[data_name_full]['files'] = []
data_resources[data_name_full]['dirs'] = []
data_resources[data_name_full]['urls'] = []
for entry in proceedings:
if volumes=='all' or entry['volume'] in volumes:
file = entry['yaml'].split('/')[-1]
dir = 'v' + str(entry['volume'])
data_resources[data_name_full]['files'].append([file])
data_resources[data_name_full]['dirs'].append([dir])
data_resources[data_name_full]['urls'].append(data_resources[data_set]['urls'][0])
Y = []
# Download the volume data
if not data_available(data_name_full):
download_data(data_name_full)
for entry in reversed(proceedings):
volume = entry['volume']
if volumes == 'all' or volume in volumes:
file = entry['yaml'].split('/')[-1]
volume_file = open(os.path.join(
data_path, data_name_full,
'v'+str(volume), file
), 'r')
Y+=yaml.load(volume_file)
if pandas_available:
Y = pd.DataFrame(Y)
Y['published'] = pd.to_datetime(Y['published'])
#Y.columns.values[4] = json_object('authors')
#Y.columns.values[7] = json_object('editors')
Y['issued'] = Y['issued'].apply(lambda x: np.datetime64(datetime.datetime(*x['date-parts'])))
Y['author'] = Y['author'].apply(lambda x: [str(author['given']) + ' ' + str(author['family']) for author in x])
Y['editor'] = Y['editor'].apply(lambda x: [str(editor['given']) + ' ' + str(editor['family']) for editor in x])
columns = list(Y.columns)
columns[14] = datetime64_('published')
columns[11] = datetime64_('issued')
Y.columns = columns
return data_details_return({'Y' : Y, 'info' : 'Data is a pandas data frame containing each paper, its abstract, authors, volumes and venue.'}, data_set)
|
Abstracts from the Proceedings of Machine Learning Research
|
entailment
|
def football_data(season='1617', data_set='football_data'):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
league_dict = {'E0':0, 'E1':1, 'E2': 2, 'E3': 3, 'EC':4}
def league2num(string):
if isinstance(string, bytes):
string = string.decode('utf-8')
return league_dict[string]
def football2num(string):
if isinstance(string, bytes):
string = string.decode('utf-8')
if string in football_dict:
return football_dict[string]
else:
football_dict[string] = len(football_dict)+1
return len(football_dict)+1
def datestr2num(s):
import datetime
from matplotlib.dates import date2num
return date2num(datetime.datetime.strptime(s.decode('utf-8'),'%d/%m/%y'))
data_set_season = data_set + '_' + season
data_resources[data_set_season] = copy.deepcopy(data_resources[data_set])
data_resources[data_set_season]['urls'][0]+=season + '/'
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv']
if start_year>4 and start_year < 93:
files += ['EC.csv']
data_resources[data_set_season]['files'] = [files]
if not data_available(data_set_season):
download_data(data_set_season)
start = True
for file in reversed(files):
filename = os.path.join(data_path, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(data_path, data_set_season, 'temp.csv')
input = open(filename, encoding='ISO-8859-1')
output = open(writename, 'w')
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(writename,skiprows=1, usecols=(0, 1, 2, 3, 4, 5), converters = {0: league2num, 1: datestr2num, 2:football2num, 3:football2num}, delimiter=',')
if start:
X = table[:, :4]
Y = table[:, 4:]
start=False
else:
X = np.append(X, table[:, :4], axis=0)
Y = np.append(Y, table[:, 4:], axis=0)
return data_details_return({'X': X, 'Y': Y, 'covariates': [discrete(league_dict, 'league'), datenum('match_day'), discrete(football_dict, 'home team'), discrete(football_dict, 'away team')], 'response': [integer('home score'), integer('away score')]}, data_set)
|
Football data from English games since 1993. This downloads data from football-data.co.uk for the given season.
|
entailment
|
def lee_yeast_ChIP(data_set='lee_yeast_ChIP'):
"""Yeast ChIP data from Lee et al."""
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'binding_by_gene.tsv')
S = read_csv(filename, header=1, index_col=0, sep='\t')
transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed']
annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']]
S = S[transcription_factors]
return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set)
|
Yeast ChIP data from Lee et al.
|
entailment
|
def google_trends(query_terms=['big data', 'machine learning', 'data science'], data_set='google_trends', refresh_data=False):
"""
Data downloaded from Google trends for given query terms. Warning,
if you use this function multiple times in a row you get blocked
due to terms of service violations.
The function will cache the result of any query in an attempt to
avoid this. If you wish to refresh an old query set refresh_data
to True. The function is inspired by this notebook:
http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb
"""
query_terms.sort()
import pandas as pd
# Create directory name for data
dir_path = os.path.join(data_path,'google_trends')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
dir_name = '-'.join(query_terms)
dir_name = dir_name.replace(' ', '_')
dir_path = os.path.join(dir_path,dir_name)
file = 'data.csv'
file_name = os.path.join(dir_path,file)
if not os.path.exists(file_name) or refresh_data:
print("Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks.")
# quote the query terms.
quoted_terms = []
for term in query_terms:
quoted_terms.append(quote(term))
print("Query terms: ", ', '.join(query_terms))
print("Fetching query:")
query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(quoted_terms)
data = urlopen(query).read().decode('utf8')
print("Done.")
# In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD.
header = """// Data table response\ngoogle.visualization.Query.setResponse("""
data = data[len(header):-2]
data = re.sub('new Date\((\d+),(\d+),(\d+)\)', (lambda m: '"%s-%02d-%02d"' % (m.group(1).strip(), 1+int(m.group(2)), int(m.group(3)))), data)
timeseries = json.loads(data)
columns = [k['label'] for k in timeseries['table']['cols']]
rows = list(map(lambda x: [k['v'] for k in x['c']], timeseries['table']['rows']))
df = pd.DataFrame(rows, columns=columns)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
df.to_csv(file_name)
else:
print("Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function.")
print("Query terms: ", ', '.join(query_terms))
df = pd.read_csv(file_name, parse_dates=[0])
columns = df.columns
terms = len(query_terms)
import datetime
from matplotlib.dates import date2num
X = np.asarray([(date2num(datetime.datetime.strptime(df.ix[row]['Date'], '%Y-%m-%d')), i) for i in range(terms) for row in df.index])
Y = np.asarray([[df.ix[row][query_terms[i]]] for i in range(terms) for row in df.index ])
output_info = columns[1:]
cats = {}
for i in range(terms):
cats[query_terms[i]] = i
return data_details_return({'data frame' : df, 'X': X, 'Y': Y, 'query_terms': query_terms, 'info': "Data downloaded from google trends with query terms: " + ', '.join(query_terms) + '.', 'covariates' : [datenum('date'), discrete(cats, 'query_terms')], 'response' : ['normalized interest']}, data_set)
|
Data downloaded from Google trends for given query terms. Warning,
if you use this function multiple times in a row you get blocked
due to terms of service violations.
The function will cache the result of any query in an attempt to
avoid this. If you wish to refresh an old query set refresh_data
to True. The function is inspired by this notebook:
http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb
|
entailment
|
def osu_run1(data_set='osu_run1', sample_every=4):
"""Ohio State University's Run1 motion capture data set."""
path = os.path.join(data_path, data_set)
if not data_available(data_set):
import zipfile
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r')
for name in zip.namelist():
zip.extract(name, path)
from . import mocap
Y, connect = mocap.load_text_data('Aug210106', path)
Y = Y[0:-1:sample_every, :]
return data_details_return({'Y': Y, 'connect' : connect}, data_set)
|
Ohio State University's Run1 motion capture data set.
|
entailment
|
def toy_linear_1d_classification(seed=default_seed):
"""Simple classification data in one dimension for illustrating models."""
def sample_class(f):
p = 1. / (1. + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
np.random.seed(seed=seed)
x1 = np.random.normal(-3, 5, 20)
x2 = np.random.normal(3, 5, 20)
X = (np.r_[x1, x2])[:, None]
return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'covariates' : ['X'], 'response': [discrete({'positive': 1, 'negative': -1})],'seed' : seed}
|
Simple classification data in one dimension for illustrating models.
|
entailment
|
def airline_delay(data_set='airline_delay', num_train=700000, num_test=100000, seed=default_seed):
"""Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence"""
if not data_available(data_set):
download_data(data_set)
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'filtered_data.pickle')
# 1. Load the dataset
import pandas as pd
data = pd.read_pickle(filename)
# WARNING: removing year
data.pop('Year')
# Get data matrices
Yall = data.pop('ArrDelay').values[:,None]
Xall = data.values
# Subset the data (memory!!)
all_data = num_train+num_test
Xall = Xall[:all_data]
Yall = Yall[:all_data]
# Get testing points
np.random.seed(seed=seed)
N_shuffled = permute(Yall.shape[0])
train, test = N_shuffled[num_test:], N_shuffled[:num_test]
X, Y = Xall[train], Yall[train]
Xtest, Ytest = Xall[test], Yall[test]
covariates = ['month', 'day of month', 'day of week', 'departure time', 'arrival time', 'air time', 'distance to travel', 'age of aircraft / years']
response = ['delay']
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "Airline delay data used for demonstrating Gaussian processes for big data.", 'covariates': covariates, 'response': response}, data_set)
|
Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence
|
entailment
|
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
cats = {}
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
cats[dataset.__name__] = i
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'covariates' : [decimalyear('year', '%Y'), discrete(cats, 'event')],
'response' : ['time'],
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set)
|
All olympics sprint winning times for multiple output prediction.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.