sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def to_local(self, dt):
"""Convert any timestamp to local time (with tzinfo)."""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=self.utc)
return dt.astimezone(self.local) | Convert any timestamp to local time (with tzinfo). | entailment |
def to_utc(self, dt):
"""Convert any timestamp to UTC (with tzinfo)."""
if dt.tzinfo is None:
return dt.replace(tzinfo=self.utc)
return dt.astimezone(self.utc) | Convert any timestamp to UTC (with tzinfo). | entailment |
def to_naive(self, dt):
"""Convert any timestamp to pywws (utc, no tzinfo)."""
if dt.tzinfo is None:
return dt
return dt.astimezone(self.utc).replace(tzinfo=None) | Convert any timestamp to pywws (utc, no tzinfo). | entailment |
def local_replace(self, dt, use_dst=True, _recurse=False, **kwds):
"""Return pywws timestamp (utc, no tzinfo) for the most recent
local time before the pywws timestamp dt, with datetime replace
applied.
"""
local_time = dt + self.standard_offset
if use_dst:
dst_offset = self.dst(local_time)
if dst_offset:
local_time += dst_offset
adjusted_time = local_time.replace(**kwds)
if adjusted_time > local_time and not _recurse:
return self.local_replace(
dt - DAY, use_dst=use_dst, _recurse=True, **kwds)
adjusted_time -= dst_offset
if self.dst(adjusted_time):
return adjusted_time - self.standard_offset
adjusted_time = local_time.replace(**kwds)
if use_dst:
dst_offset = self.dst(adjusted_time)
adjusted_time -= dst_offset
if adjusted_time > local_time and not _recurse:
return self.local_replace(
dt - DAY, use_dst=use_dst, _recurse=True, **kwds)
return adjusted_time - self.standard_offset | Return pywws timestamp (utc, no tzinfo) for the most recent
local time before the pywws timestamp dt, with datetime replace
applied. | entailment |
def get(self, section, option, default=None):
"""Get a parameter value and return a string.
If default is specified and section or option are not defined
in the file, they are created and set to default, which is
then the return value.
"""
with self._lock:
if not self._config.has_option(section, option):
if default is not None:
self._set(section, option, default)
return default
return self._config.get(section, option) | Get a parameter value and return a string.
If default is specified and section or option are not defined
in the file, they are created and set to default, which is
then the return value. | entailment |
def set(self, section, option, value):
"""Set option in section to string value."""
with self._lock:
self._set(section, option, value) | Set option in section to string value. | entailment |
def unset(self, section, option):
"""Remove option from section."""
with self._lock:
if not self._config.has_section(section):
return
if self._config.has_option(section, option):
self._config.remove_option(section, option)
self._dirty = True
if not self._config.options(section):
self._config.remove_section(section)
self._dirty = True | Remove option from section. | entailment |
def check_params(self, *keys):
"""Ensure user has set required values in weather.ini.
Normally the :py:data:`~ServiceBase.config` names with
``required`` set are checked, but if your uploader has a
``register`` method you may need to check for other data.
:param str keys: the :py:data:`~ServiceBase.config` names to
verify.
"""
for key in keys:
if not self.params[key]:
raise RuntimeError('"{}" not set in weather.ini'.format(key)) | Ensure user has set required values in weather.ini.
Normally the :py:data:`~ServiceBase.config` names with
``required`` set are checked, but if your uploader has a
``register`` method you may need to check for other data.
:param str keys: the :py:data:`~ServiceBase.config` names to
verify. | entailment |
def monitor(i):
"""Given an iterator, yields data from it
but prints progress every 10,000 records"""
count = 0
for x in i:
count+=1
if count % 10000 == 0:
logger.info("%d records so far, current record is %s",
count, x["idx"])
yield x | Given an iterator, yields data from it
but prints progress every 10,000 records | entailment |
def calibrate_data(params, raw_data, calib_data):
"""'Calibrate' raw data, using a user-supplied function."""
start = calib_data.before(datetime.max)
if start is None:
start = datetime.min
start = raw_data.after(start + SECOND)
if start is None:
return start
del calib_data[start:]
calibrator = Calib(params, raw_data)
def calibgen(inputdata):
"""Internal generator function"""
count = 0
for data in inputdata:
idx = data['idx']
count += 1
if count % 10000 == 0:
logger.info("calib: %s", idx.isoformat(' '))
elif count % 500 == 0:
logger.debug("calib: %s", idx.isoformat(' '))
for key in ('rain', 'abs_pressure', 'temp_in'):
if data[key] is None:
logger.error('Ignoring invalid data at %s', idx.isoformat(' '))
break
else:
yield calibrator.calib(data)
calib_data.update(calibgen(raw_data[start:]))
return start | Calibrate' raw data, using a user-supplied function. | entailment |
def generate_hourly(calib_data, hourly_data, process_from):
"""Generate hourly summaries from calibrated data."""
start = hourly_data.before(datetime.max)
if start is None:
start = datetime.min
start = calib_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# set start of hour in local time (not all time offsets are integer hours)
start += timezone.standard_offset
start = start.replace(minute=0, second=0)
start -= timezone.standard_offset
del hourly_data[start:]
# preload pressure history, and find last valid rain
prev = None
pressure_history = deque()
last_rain = None
for data in calib_data[start - HOURx3:start]:
if data['rel_pressure']:
pressure_history.append((data['idx'], data['rel_pressure']))
if data['rain'] is not None:
last_rain = data['rain']
prev = data
# iterate over data in one hour chunks
stop = calib_data.before(datetime.max)
acc = HourAcc(last_rain)
def hourlygen(inputdata, prev):
"""Internal generator function"""
hour_start = start
count = 0
while hour_start <= stop:
count += 1
if count % 1008 == 0:
logger.info("hourly: %s", hour_start.isoformat(' '))
elif count % 24 == 0:
logger.debug("hourly: %s", hour_start.isoformat(' '))
hour_end = hour_start + HOUR
acc.reset()
for data in inputdata[hour_start:hour_end]:
if data['rel_pressure']:
pressure_history.append((data['idx'], data['rel_pressure']))
if prev:
err = data['idx'] - prev['idx']
if abs(err - timedelta(minutes=data['delay'])) > TIME_ERR:
logger.info('unexpected data interval %s %s',
data['idx'].isoformat(' '), str(err))
acc.add_raw(data)
prev = data
new_data = acc.result()
if new_data and (new_data['idx'] - hour_start) >= timedelta(minutes=9):
# compute pressure trend
new_data['pressure_trend'] = None
if new_data['rel_pressure']:
target = new_data['idx'] - HOURx3
while (len(pressure_history) >= 2 and
abs(pressure_history[0][0] - target) >
abs(pressure_history[1][0] - target)):
pressure_history.popleft()
if (pressure_history and
abs(pressure_history[0][0] - target) < HOUR):
new_data['pressure_trend'] = (
new_data['rel_pressure'] - pressure_history[0][1])
# store new hourly data
yield new_data
hour_start = hour_end
hourly_data.update(hourlygen(calib_data, prev))
return start | Generate hourly summaries from calibrated data. | entailment |
def generate_daily(day_end_hour, use_dst,
calib_data, hourly_data, daily_data, process_from):
"""Generate daily summaries from calibrated and hourly data."""
start = daily_data.before(datetime.max)
if start is None:
start = datetime.min
start = calib_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# round to start of this day, in local time
start = timezone.local_replace(
start, use_dst=use_dst, hour=day_end_hour, minute=0, second=0)
del daily_data[start:]
stop = calib_data.before(datetime.max)
acc = DayAcc()
def dailygen(inputdata):
"""Internal generator function"""
day_start = start
count = 0
while day_start <= stop:
count += 1
if count % 30 == 0:
logger.info("daily: %s", day_start.isoformat(' '))
else:
logger.debug("daily: %s", day_start.isoformat(' '))
day_end = day_start + DAY
if use_dst:
# day might be 23 or 25 hours long
day_end = timezone.local_replace(
day_end + HOURx3, use_dst=use_dst, hour=day_end_hour)
acc.reset()
for data in inputdata[day_start:day_end]:
acc.add_raw(data)
for data in hourly_data[day_start:day_end]:
acc.add_hourly(data)
new_data = acc.result()
if new_data:
new_data['start'] = day_start
yield new_data
day_start = day_end
daily_data.update(dailygen(calib_data))
return start | Generate daily summaries from calibrated and hourly data. | entailment |
def generate_monthly(rain_day_threshold, day_end_hour, use_dst,
daily_data, monthly_data, process_from):
"""Generate monthly summaries from daily data."""
start = monthly_data.before(datetime.max)
if start is None:
start = datetime.min
start = daily_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# set start to start of first day of month (local time)
start = timezone.local_replace(
start, use_dst=use_dst, day=1, hour=day_end_hour, minute=0, second=0)
if day_end_hour >= 12:
# month actually starts on the last day of previous month
start -= DAY
del monthly_data[start:]
stop = daily_data.before(datetime.max)
if stop is None:
return None
acc = MonthAcc(rain_day_threshold)
def monthlygen(inputdata):
"""Internal generator function"""
month_start = start
count = 0
while month_start <= stop:
count += 1
if count % 12 == 0:
logger.info("monthly: %s", month_start.isoformat(' '))
else:
logger.debug("monthly: %s", month_start.isoformat(' '))
month_end = month_start + WEEK
if month_end.month < 12:
month_end = month_end.replace(month=month_end.month+1)
else:
month_end = month_end.replace(month=1, year=month_end.year+1)
month_end = month_end - WEEK
if use_dst:
# month might straddle summer time start or end
month_end = timezone.local_replace(
month_end + HOURx3, use_dst=use_dst, hour=day_end_hour)
acc.reset()
for data in inputdata[month_start:month_end]:
acc.add_daily(data)
new_data = acc.result()
if new_data:
new_data['start'] = month_start
yield new_data
month_start = month_end
monthly_data.update(monthlygen(daily_data))
return start | Generate monthly summaries from daily data. | entailment |
def process_data(context):
"""Generate summaries from raw weather station data.
The meteorological day end (typically 2100 or 0900 local time) is
set in the preferences file ``weather.ini``. The default value is
2100 (2200 during DST), following the historical convention for
weather station readings.
"""
logger.info('Generating summary data')
# get time of last record
last_raw = context.raw_data.before(datetime.max)
if last_raw is None:
raise IOError('No data found. Check data directory parameter.')
# get daytime end hour (in local time)
day_end_hour, use_dst = get_day_end_hour(context.params)
# get other config
rain_day_threshold = float(
context.params.get('config', 'rain day threshold', '0.2'))
# calibrate raw data
start = calibrate_data(context.params, context.raw_data, context.calib_data)
# generate hourly data
start = generate_hourly(context.calib_data, context.hourly_data, start)
# generate daily data
start = generate_daily(day_end_hour, use_dst,
context.calib_data, context.hourly_data, context.daily_data, start)
# generate monthly data
generate_monthly(rain_day_threshold, day_end_hour, use_dst,
context.daily_data, context.monthly_data, start)
return 0 | Generate summaries from raw weather station data.
The meteorological day end (typically 2100 or 0900 local time) is
set in the preferences file ``weather.ini``. The default value is
2100 (2200 during DST), following the historical convention for
weather station readings. | entailment |
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = self.dev.bulkRead(0x81, size, timeout=1200)
if not result or len(result) < size:
raise IOError('pywws.device_libusb1.USBDevice.read_data failed')
# Python2 libusb1 version 1.5 and earlier returns a string
if not isinstance(result[0], int):
result = map(ord, result)
return list(result) | Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int) | entailment |
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
if sys.version_info[0] < 3:
str_buf = ''.join(map(chr, buf))
else:
str_buf = bytes(buf)
result = self.dev.controlWrite(
libusb1.LIBUSB_ENDPOINT_OUT | libusb1.LIBUSB_TYPE_CLASS |
libusb1.LIBUSB_RECIPIENT_INTERFACE,
libusb1.LIBUSB_REQUEST_SET_CONFIGURATION,
0x200, 0, str_buf, timeout=50)
if result != len(buf):
raise IOError('pywws.device_libusb1.USBDevice.write_data failed')
return True | Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool | entailment |
def before(self, idx):
"""Return datetime of newest existing data record whose
datetime is < idx.
Might not even be in the same year! If no such record exists,
return None."""
if not isinstance(idx, datetime):
raise TypeError("'%s' is not %s" % (idx, datetime))
day = min(idx.date(), self._hi_limit - DAY)
while day >= self._lo_limit:
if day < self._rd_cache.lo or day >= self._rd_cache.hi:
self._load(self._rd_cache, day)
self._rd_cache.set_ptr(idx)
if self._rd_cache.ptr > 0:
return self._rd_cache.data[self._rd_cache.ptr - 1]['idx']
day = self._rd_cache.lo - DAY
return None | Return datetime of newest existing data record whose
datetime is < idx.
Might not even be in the same year! If no such record exists,
return None. | entailment |
def after(self, idx):
"""Return datetime of oldest existing data record whose
datetime is >= idx.
Might not even be in the same year! If no such record exists,
return None."""
if not isinstance(idx, datetime):
raise TypeError("'%s' is not %s" % (idx, datetime))
day = max(idx.date(), self._lo_limit)
while day < self._hi_limit:
if day < self._rd_cache.lo or day >= self._rd_cache.hi:
self._load(self._rd_cache, day)
self._rd_cache.set_ptr(idx)
if self._rd_cache.ptr < len(self._rd_cache.data):
return self._rd_cache.data[self._rd_cache.ptr]['idx']
day = self._rd_cache.hi
return None | Return datetime of oldest existing data record whose
datetime is >= idx.
Might not even be in the same year! If no such record exists,
return None. | entailment |
def nearest(self, idx):
"""Return datetime of record whose datetime is nearest idx."""
hi = self.after(idx)
lo = self.before(idx)
if hi is None:
return lo
if lo is None:
return hi
if abs(hi - idx) < abs(lo - idx):
return hi
return lo | Return datetime of record whose datetime is nearest idx. | entailment |
def clear(self):
"""Clears all data from the data store permanently"""
for root, dirs, files in os.walk(self._root_dir, topdown=False):
for file in files:
os.unlink(os.path.join(root, file))
os.rmdir(root)
# Get the root dir back and re-initialise to start again
root_dir = os.path.abspath(
os.path.join(self._root_dir, os.pardir))
self.__init__(root_dir) | Clears all data from the data store permanently | entailment |
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = self.dev.read(0x81, size, timeout=1200)
if not result or len(result) < size:
raise IOError('pywws.device_pyusb1.USBDevice.read_data failed')
return list(result) | Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int) | entailment |
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
bmRequestType = usb.util.build_request_type(
usb.util.ENDPOINT_OUT,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE
)
result = self.dev.ctrl_transfer(
bmRequestType=bmRequestType,
bRequest=usb.REQ_SET_CONFIGURATION,
data_or_wLength=buf,
wValue=0x200,
timeout=50)
if result != len(buf):
raise IOError('pywws.device_pyusb1.USBDevice.write_data failed')
return True | Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool | entailment |
def pressure_trend_text(trend):
"""Convert pressure trend to a string, as used by the UK met
office.
"""
_ = pywws.localisation.translation.ugettext
if trend > 6.0:
return _(u'rising very rapidly')
elif trend > 3.5:
return _(u'rising quickly')
elif trend > 1.5:
return _(u'rising')
elif trend >= 0.1:
return _(u'rising slowly')
elif trend < -6.0:
return _(u'falling very rapidly')
elif trend < -3.5:
return _(u'falling quickly')
elif trend < -1.5:
return _(u'falling')
elif trend <= -0.1:
return _(u'falling slowly')
return _(u'steady') | Convert pressure trend to a string, as used by the UK met
office. | entailment |
def winddir_average(data, threshold, min_count, decay=1.0):
"""Compute average wind direction (in degrees) for a slice of data.
The wind speed and direction of each data item is converted to a
vector before averaging, so the result reflects the dominant wind
direction during the time period covered by the data.
Setting the ``decay`` parameter converts the filter from a simple
averager to one where the most recent sample carries the highest
weight, and earlier samples have a lower weight according to how
long ago they were.
This process is an approximation of "exponential smoothing". See
`Wikipedia <http://en.wikipedia.org/wiki/Exponential_smoothing>`_
for a detailed discussion.
The parameter ``decay`` corresponds to the value ``(1 - alpha)``
in the Wikipedia description. Because the weather data being
smoothed may not be at regular intervals this parameter is the
decay over 5 minutes. Weather data at other intervals will have
its weight scaled accordingly.
:note: The return value is in degrees, not the 0..15 range used
elsewhere in pywws.
:param data: a slice of pywws raw/calib or hourly data.
:type data: pywws.storage.CoreStore
:param threshold: minimum average windspeed for there to be a
valid wind direction.
:type threshold: float
:param min_count: minimum number of data items for there to be a
valid wind direction.
:type min_count: int
:param decay: filter coefficient decay rate.
:type decay: float
:rtype: float
"""
wind_filter = pywws.process.WindFilter()
count = 0
for item in data:
wind_filter.add(item)
if item['wind_dir'] is not None:
count += 1
if count < min_count:
return None
speed, direction = wind_filter.result()
if speed is None or speed < threshold:
return None
return direction * 22.5 | Compute average wind direction (in degrees) for a slice of data.
The wind speed and direction of each data item is converted to a
vector before averaging, so the result reflects the dominant wind
direction during the time period covered by the data.
Setting the ``decay`` parameter converts the filter from a simple
averager to one where the most recent sample carries the highest
weight, and earlier samples have a lower weight according to how
long ago they were.
This process is an approximation of "exponential smoothing". See
`Wikipedia <http://en.wikipedia.org/wiki/Exponential_smoothing>`_
for a detailed discussion.
The parameter ``decay`` corresponds to the value ``(1 - alpha)``
in the Wikipedia description. Because the weather data being
smoothed may not be at regular intervals this parameter is the
decay over 5 minutes. Weather data at other intervals will have
its weight scaled accordingly.
:note: The return value is in degrees, not the 0..15 range used
elsewhere in pywws.
:param data: a slice of pywws raw/calib or hourly data.
:type data: pywws.storage.CoreStore
:param threshold: minimum average windspeed for there to be a
valid wind direction.
:type threshold: float
:param min_count: minimum number of data items for there to be a
valid wind direction.
:type min_count: int
:param decay: filter coefficient decay rate.
:type decay: float
:rtype: float | entailment |
def winddir_text(pts):
"Convert wind direction from 0..15 to compass point text"
global _winddir_text_array
if pts is None:
return None
if not isinstance(pts, int):
pts = int(pts + 0.5) % 16
if not _winddir_text_array:
_ = pywws.localisation.translation.ugettext
_winddir_text_array = (
_(u'N'), _(u'NNE'), _(u'NE'), _(u'ENE'),
_(u'E'), _(u'ESE'), _(u'SE'), _(u'SSE'),
_(u'S'), _(u'SSW'), _(u'SW'), _(u'WSW'),
_(u'W'), _(u'WNW'), _(u'NW'), _(u'NNW'),
)
return _winddir_text_array[pts] | Convert wind direction from 0..15 to compass point text | entailment |
def wind_bft(ms):
"Convert wind from metres per second to Beaufort scale"
if ms is None:
return None
for bft in range(len(_bft_threshold)):
if ms < _bft_threshold[bft]:
return bft
return len(_bft_threshold) | Convert wind from metres per second to Beaufort scale | entailment |
def dew_point(temp, hum):
"""Compute dew point, using formula from
http://en.wikipedia.org/wiki/Dew_point.
"""
if temp is None or hum is None:
return None
a = 17.27
b = 237.7
gamma = ((a * temp) / (b + temp)) + math.log(float(hum) / 100.0)
return (b * gamma) / (a - gamma) | Compute dew point, using formula from
http://en.wikipedia.org/wiki/Dew_point. | entailment |
def cadhumidex(temp, humidity):
"Calculate Humidity Index as per Canadian Weather Standards"
if temp is None or humidity is None:
return None
# Formulas are adapted to not use e^(...) with no appreciable
# change in accuracy (0.0227%)
saturation_pressure = (6.112 * (10.0**(7.5 * temp / (237.7 + temp))) *
float(humidity) / 100.0)
return temp + (0.555 * (saturation_pressure - 10.0)) | Calculate Humidity Index as per Canadian Weather Standards | entailment |
def usaheatindex(temp, humidity, dew=None):
"""Calculate Heat Index as per USA National Weather Service Standards
See http://en.wikipedia.org/wiki/Heat_index, formula 1. The
formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40%
"""
if temp is None or humidity is None:
return None
if dew is None:
dew = dew_point(temp, humidity)
if temp < 26.7 or humidity < 40 or dew < 12.0:
return temp
T = (temp * 1.8) + 32.0
R = humidity
c_1 = -42.379
c_2 = 2.04901523
c_3 = 10.14333127
c_4 = -0.22475541
c_5 = -0.00683783
c_6 = -0.05481717
c_7 = 0.00122874
c_8 = 0.00085282
c_9 = -0.00000199
return ((c_1 + (c_2 * T) + (c_3 * R) + (c_4 * T * R) + (c_5 * (T**2)) +
(c_6 * (R**2)) + (c_7 * (T**2) * R) + (c_8 * T * (R**2)) +
(c_9 * (T**2) * (R**2))) - 32.0) / 1.8 | Calculate Heat Index as per USA National Weather Service Standards
See http://en.wikipedia.org/wiki/Heat_index, formula 1. The
formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40% | entailment |
def wind_chill(temp, wind):
"""Compute wind chill, using formula from
http://en.wikipedia.org/wiki/wind_chill
"""
if temp is None or wind is None:
return None
wind_kph = wind * 3.6
if wind_kph <= 4.8 or temp > 10.0:
return temp
return min(13.12 + (temp * 0.6215) +
(((0.3965 * temp) - 11.37) * (wind_kph ** 0.16)),
temp) | Compute wind chill, using formula from
http://en.wikipedia.org/wiki/wind_chill | entailment |
def apparent_temp(temp, rh, wind):
"""Compute apparent temperature (real feel), using formula from
http://www.bom.gov.au/info/thermal_stress/
"""
if temp is None or rh is None or wind is None:
return None
vap_press = (float(rh) / 100.0) * 6.105 * math.exp(
17.27 * temp / (237.7 + temp))
return temp + (0.33 * vap_press) - (0.70 * wind) - 4.00 | Compute apparent temperature (real feel), using formula from
http://www.bom.gov.au/info/thermal_stress/ | entailment |
def cloud_base(temp, hum):
"""Calculate cumulus cloud base in metres, using formula from
https://en.wikipedia.org/wiki/Cloud_base or
https://de.wikipedia.org/wiki/Kondensationsniveau#Konvektionskondensationsniveau
"""
if temp is None or hum is None:
return None
dew_pt = dew_point(temp, hum)
spread = float(temp) - dew_pt
return spread * 125.0 | Calculate cumulus cloud base in metres, using formula from
https://en.wikipedia.org/wiki/Cloud_base or
https://de.wikipedia.org/wiki/Kondensationsniveau#Konvektionskondensationsniveau | entailment |
def upload(training_dir, algorithm_id=None, writeup=None, api_key=None, ignore_open_monitors=False):
"""Upload the results of training (as automatically recorded by your
env's monitor) to OpenAI Gym.
Args:
training_dir (Optional[str]): A directory containing the results of a training run.
algorithm_id (Optional[str]): An algorithm id indicating the particular version of the algorithm (including choices of parameters) you are running (visit https://gym.openai.com/algorithms to create an id)
writeup (Optional[str]): A Gist URL (of the form https://gist.github.com/<user>/<id>) containing your writeup for this evaluation.
api_key (Optional[str]): Your OpenAI API key. Can also be provided as an environment variable (OPENAI_GYM_API_KEY).
"""
if not ignore_open_monitors:
open_monitors = monitoring._open_monitors()
if len(open_monitors) > 0:
envs = [m.env.spec.id if m.env.spec else '(unknown)' for m in open_monitors]
raise error.Error("Still have an open monitor on {}. You must run 'env.monitor.close()' before uploading.".format(', '.join(envs)))
env_info, training_episode_batch, training_video = upload_training_data(training_dir, api_key=api_key)
env_id = env_info['env_id']
training_episode_batch_id = training_video_id = None
if training_episode_batch:
training_episode_batch_id = training_episode_batch.id
if training_video:
training_video_id = training_video.id
if logger.level <= logging.INFO:
if training_episode_batch_id is not None and training_video_id is not None:
logger.info('[%s] Creating evaluation object from %s with learning curve and training video', env_id, training_dir)
elif training_episode_batch_id is not None:
logger.info('[%s] Creating evaluation object from %s with learning curve', env_id, training_dir)
elif training_video_id is not None:
logger.info('[%s] Creating evaluation object from %s with training video', env_id, training_dir)
else:
raise error.Error("[%s] You didn't have any recorded training data in {}. Once you've used 'env.monitor.start(training_dir)' to start recording, you need to actually run some rollouts. Please join the community chat on https://gym.openai.com if you have any issues.".format(env_id, training_dir))
evaluation = resource.Evaluation.create(
training_episode_batch=training_episode_batch_id,
training_video=training_video_id,
env=env_info['env_id'],
algorithm={
'id': algorithm_id,
},
writeup=writeup,
gym_version=env_info['gym_version'],
api_key=api_key,
# >>>>>>>>> START changes >>>>>>>>>>>>>>>>>>>>>>>>
env_info=env_info,
# <<<<<<<<< END changes <<<<<<<<<<<<<<<<<<<<<<<<<<
)
logger.info(
"""
****************************************************
You successfully uploaded your evaluation on %s to
OpenAI Gym! You can find it at:
%s
****************************************************
""".rstrip(), env_id, evaluation.web_url())
return evaluation | Upload the results of training (as automatically recorded by your
env's monitor) to OpenAI Gym.
Args:
training_dir (Optional[str]): A directory containing the results of a training run.
algorithm_id (Optional[str]): An algorithm id indicating the particular version of the algorithm (including choices of parameters) you are running (visit https://gym.openai.com/algorithms to create an id)
writeup (Optional[str]): A Gist URL (of the form https://gist.github.com/<user>/<id>) containing your writeup for this evaluation.
api_key (Optional[str]): Your OpenAI API key. Can also be provided as an environment variable (OPENAI_GYM_API_KEY). | entailment |
def load_user_envs(self):
""" Loads downloaded user envs from filesystem cache on `import gym` """
installed_packages = self._list_packages()
# Tagging core envs
gym_package = 'gym ({})'.format(installed_packages['gym']) if 'gym' in installed_packages else 'gym'
core_specs = registry.all()
for spec in core_specs:
spec.source = 'OpenAI Gym Core Package'
spec.package = gym_package
# Loading user envs
if not os.path.isfile(self.cache_path):
return
with open(self.cache_path) as cache:
for line in cache:
user_package, registered_envs = self._load_package(line.rstrip('\n'), installed_packages)
if logger.level <= logging.DEBUG:
logger.debug('Installed %d user environments from package "%s"', len(registered_envs), user_package['name'])
if self.cache_needs_update:
self._update_cache()
if len(self.env_ids) > 0:
logger.info('Found and registered %d user environments.', len(self.env_ids)) | Loads downloaded user envs from filesystem cache on `import gym` | entailment |
def pull(self, source=''):
"""
Downloads and registers a user environment from a git repository
Args:
source: the source where to download the envname (expected 'github.com/user/repo[@branch]')
Note: the user environment will be registered as (username/EnvName-vVersion)
"""
# Checking syntax
branch_parts = source.split('@')
if len(branch_parts) == 2:
branch = branch_parts[1]
source = branch_parts[0]
git_url = 'https://{}.git@{}'.format(source, branch)
else:
git_url = 'https://{}.git'.format(source)
# Validating params
source_parts = source.split('/')
if len(source_parts) != 3 or source_parts[0].lower() != 'github.com':
logger.warn(""" Invalid Syntax - source must be in the format 'github.com/username/repository[@branch]'
Syntax: gym.pull('github.com/username/repository')
where username is a GitHub username, repository is the name of a GitHub repository.""")
return
username = source_parts[1]
modified_packages = []
# Installing pip package
logger.info('Installing pip package from "%s"', git_url)
packages_before = self._list_packages()
return_code = self._run_cmd('{} install --upgrade git+{}'.format(pip_exec, git_url))
if return_code != 0: # Failed - pip will display the error message
return
# Detecting new and upgraded packages
packages_after = self._list_packages()
for package_name in packages_after:
package_version = packages_after[package_name]
if package_name not in packages_before:
logger.info('Installed new package: "%s (%s)"', package_name, package_version)
modified_packages.append(package_name)
elif LooseVersion(packages_before[package_name]) < LooseVersion(package_version):
logger.info('Upgraded package "%s" from "%s" to "%s"',
package_name, packages_before[package_name], package_version)
modified_packages.append(package_name)
elif LooseVersion(packages_before[package_name]) > LooseVersion(package_version):
logger.warn('Package "%s" downgraded from "%s" to "%s". Are you sure that is what you want?',
package_name, packages_before[package_name], package_version)
modified_packages.append(package_name)
# Package conflict - check if already installed from a different source
for package_name in modified_packages:
if package_name in self.user_packages and source != self.user_packages[package_name]['source']:
logger.warn('Package conflict - The package "%s" from "%s" was already installed from "%s". '
'Uninstalling both packages. Please reinstall the one you want.',
package_name, source, self.user_packages[package_name]['source'])
self._deregister_envs_from_source(source)
self._deregister_envs_from_source(self.user_packages[package_name]['source'])
self._run_cmd('{} uninstall -y {}'.format(pip_exec, package_name))
del self.user_packages[package_name]
self._update_cache()
return
# Detecting if already up-to-date
if len(modified_packages) == 0:
logger.warn('The user environments for "%s" are already up-to-date (no new version detected).', source)
return
# De-register envs with same source
self._deregister_envs_from_source(source)
# Loading new packages
new_envs = set([])
uninstall_packages = []
for package_name in modified_packages:
json_line = json.dumps({'name': package_name, 'version': packages_after[package_name], 'source': source})
user_package, registered_envs = self._load_package(json_line, packages_after)
for new_env in registered_envs:
if not new_env.lower().startswith('{}/'.format(username.lower())):
if len(uninstall_packages) == 0: # We don't need to repeat the message multiple times
logger.warn('This package does not respect the naming convention and will be uninstalled to avoid conflicts. '
'Expected user environment to start with "{}/", but got "{}" instead.'.format(username, new_env))
uninstall_packages.append(package_name)
new_envs = new_envs | registered_envs
# Removing packages and deregistering envs if they don't respect naming convention
if len(uninstall_packages) > 0:
self._deregister_envs_from_source(source)
for package_name in uninstall_packages:
self._run_cmd('{} uninstall -y {}'.format(pip_exec, package_name))
return
# Updating cache
self._update_cache()
# Displaying results
logger.info('--------------------------------------------------')
if len(new_envs) > 0:
for env in sorted(new_envs, key=lambda s: s.lower()):
logger.info('Successfully registered the environment: "%s"', env)
else:
logger.info('No environments have been registered. The following packages were modified: %s', ','.join(modified_packages))
return | Downloads and registers a user environment from a git repository
Args:
source: the source where to download the envname (expected 'github.com/user/repo[@branch]')
Note: the user environment will be registered as (username/EnvName-vVersion) | entailment |
def _load_package(self, json_line, installed_packages):
""" Returns the user_package (name, version, source), and the list of envs registered when the package was loaded """
if len(json_line) == 0:
return {}, set([])
valid_json = False
try:
user_package = json.loads(json_line)
valid_json = True
except ValueError:
user_package = {}
package_name = user_package['name'] if 'name' in user_package else None
module_name = package_name.replace('-', '_') if package_name is not None else ''
envs_before = set(registry.list())
if not valid_json or package_name is None:
self.cache_needs_update = True
logger.warn('Unable to load user environments. Try deleting your cache '
'file "%s" if this problem persists. \n\nLine: %s', self.cache_path, json_line)
return {}, set([])
elif package_name not in installed_packages:
self.cache_needs_update = True
logger.warn('The package "%s" does not seem to be installed anymore. User environments from this '
'package will not be registered, and the package will no longer be loaded on `import gym`', package_name)
elif module_name in sys.modules:
self.cache_needs_update = True
try:
reload_module(sys.modules[module_name])
except ImportError:
if 'gym' in package_name: # To avoid uninstalling failing dependencies
logger.warn('Unable to reload the module "%s" from package "%s" (%s). This is usually caused by a '
'invalid pip package. The package will be uninstalled and no longer be loaded on `import gym`.\n',
module_name, package_name, installed_packages[package_name])
traceback.print_exc(file=sys.stdout)
sys.stdout.write('\n')
self._run_cmd('{} uninstall -y {}'.format(pip_exec, package_name))
else:
try:
__import__(module_name)
except ImportError:
if 'gym' in package_name: # To avoid uninstalling failing dependencies
self.cache_needs_update = True
logger.warn('Unable to import the module "%s" from package "%s" (%s). This is usually caused by a '
'invalid pip package. The package will be uninstalled and no longer be loaded on `import gym`.\n',
module_name, package_name, installed_packages[package_name])
traceback.print_exc(file=sys.stdout)
sys.stdout.write('\n')
self._run_cmd('{} uninstall -y {}'.format(pip_exec, package_name))
envs_after = set(registry.list())
registered_envs = envs_after - envs_before
if len(registered_envs) > 0:
self.user_packages[package_name] = user_package
for new_env in registered_envs:
new_spec = registry.spec(new_env)
new_spec.source = user_package['source']
new_spec.package = '{} ({})'.format(user_package['name'], user_package['version'])
self.env_ids.add(new_env.lower())
return user_package, registered_envs | Returns the user_package (name, version, source), and the list of envs registered when the package was loaded | entailment |
def make(self):
"""Instantiates an instance of the environment with appropriate kwargs"""
if self._entry_point is None:
raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))
cls = load(self._entry_point)
env = cls(**self._kwargs)
# Make the enviroment aware of which spec it came from.
env.spec = self
env = env.build(extra_wrappers=self._wrappers)
return env | Instantiates an instance of the environment with appropriate kwargs | entailment |
def _parse_path(self, path):
"""Return (hosts, path) tuple"""
# Support specifying another host via hdfs://host:port/path syntax
# We ignore the scheme and piece together the query and fragment
# Note that HDFS URIs are not URL encoded, so a '?' or a '#' in the URI is part of the
# path
parts = urlsplit(path, allow_fragments=False)
if not parts.path.startswith('/'):
raise ValueError("Path must be absolute, was given {}".format(path))
if parts.scheme not in ('', 'hdfs', 'hftp', 'webhdfs'):
warnings.warn("Unexpected scheme {}".format(parts.scheme))
assert not parts.fragment
path = parts.path
if parts.query:
path += '?' + parts.query
if parts.netloc:
hosts = self._parse_hosts(parts.netloc)
else:
hosts = self.hosts
return hosts, path | Return (hosts, path) tuple | entailment |
def _record_last_active(self, host):
"""Put host first in our host list, so we try it first next time
The implementation of get_active_namenode relies on this reordering.
"""
if host in self.hosts: # this check is for when user passes a host at request time
# Keep this thread safe: set hosts atomically and update it before the timestamp
self.hosts = [host] + [h for h in self.hosts if h != host]
self._last_time_recorded_active = time.time() | Put host first in our host list, so we try it first next time
The implementation of get_active_namenode relies on this reordering. | entailment |
def _request(self, method, path, op, expected_status=httplib.OK, **kwargs):
"""Make a WebHDFS request against the NameNodes
This function handles NameNode failover and error checking.
All kwargs are passed as query params to the WebHDFS server.
"""
hosts, path = self._parse_path(path)
_transform_user_name_key(kwargs)
kwargs.setdefault('user.name', self.user_name)
formatted_args = ' '.join('{}={}'.format(*t) for t in kwargs.items())
_logger.info("%s %s %s %s", op, path, formatted_args, ','.join(hosts))
kwargs['op'] = op
for i in range(self.max_tries):
log_level = logging.DEBUG if i < self.max_tries - 1 else logging.WARNING
for host in hosts:
try:
response = self._requests_session.request(
method,
'http://{}{}{}'.format(host, WEBHDFS_PATH, url_quote(path.encode('utf-8'))),
params=kwargs, timeout=self.timeout, allow_redirects=False,
**self._requests_kwargs
)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
_logger.log(log_level, "Failed to reach to %s (attempt %d/%d)",
host, i + 1, self.max_tries, exc_info=True)
continue
try:
_check_response(response, expected_status)
except (HdfsRetriableException, HdfsStandbyException):
_logger.log(log_level, "%s is in startup or standby mode (attempt %d/%d)",
host, i + 1, self.max_tries, exc_info=True)
continue
# Note: standby NN can still return basic validation errors, so non-StandbyException
# does not necessarily mean we have the active NN.
self._record_last_active(host)
return response
if i != self.max_tries - 1:
time.sleep(self.retry_delay)
raise HdfsNoServerException("Could not use any of the given hosts") | Make a WebHDFS request against the NameNodes
This function handles NameNode failover and error checking.
All kwargs are passed as query params to the WebHDFS server. | entailment |
def create(self, path, data, **kwargs):
"""Create a file at the given path.
:param data: ``bytes`` or a ``file``-like object to upload
:param overwrite: If a file already exists, should it be overwritten?
:type overwrite: bool
:param blocksize: The block size of a file.
:type blocksize: long
:param replication: The number of replications of a file.
:type replication: short
:param permission: The permission of a file/directory. Any radix-8 integer (leading zeros
may be omitted.)
:type permission: octal
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int
"""
metadata_response = self._put(
path, 'CREATE', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
assert not metadata_response.content
data_response = self._requests_session.put(
metadata_response.headers['location'], data=data, **self._requests_kwargs)
_check_response(data_response, expected_status=httplib.CREATED)
assert not data_response.content | Create a file at the given path.
:param data: ``bytes`` or a ``file``-like object to upload
:param overwrite: If a file already exists, should it be overwritten?
:type overwrite: bool
:param blocksize: The block size of a file.
:type blocksize: long
:param replication: The number of replications of a file.
:type replication: short
:param permission: The permission of a file/directory. Any radix-8 integer (leading zeros
may be omitted.)
:type permission: octal
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int | entailment |
def append(self, path, data, **kwargs):
"""Append to the given file.
:param data: ``bytes`` or a ``file``-like object
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int
"""
metadata_response = self._post(
path, 'APPEND', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
data_response = self._requests_session.post(
metadata_response.headers['location'], data=data, **self._requests_kwargs)
_check_response(data_response)
assert not data_response.content | Append to the given file.
:param data: ``bytes`` or a ``file``-like object
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int | entailment |
def concat(self, target, sources, **kwargs):
"""Concat existing files together.
For preconditions, see
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources
:param target: the path to the target destination.
:param sources: the paths to the sources to use for the concatenation.
:type sources: list
"""
if isinstance(sources, basestring):
raise ValueError("sources should be a list")
if any(',' in s for s in sources):
raise NotImplementedError("WebHDFS does not support commas in concat")
response = self._post(target, 'CONCAT', sources=','.join(sources), **kwargs)
assert not response.content | Concat existing files together.
For preconditions, see
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources
:param target: the path to the target destination.
:param sources: the paths to the sources to use for the concatenation.
:type sources: list | entailment |
def open(self, path, **kwargs):
"""Return a file-like object for reading the given HDFS path.
:param offset: The starting byte position.
:type offset: long
:param length: The number of bytes to be processed.
:type length: long
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int
:rtype: file-like object
"""
metadata_response = self._get(
path, 'OPEN', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
data_response = self._requests_session.get(
metadata_response.headers['location'], stream=True, **self._requests_kwargs)
_check_response(data_response)
return data_response.raw | Return a file-like object for reading the given HDFS path.
:param offset: The starting byte position.
:type offset: long
:param length: The number of bytes to be processed.
:type length: long
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int
:rtype: file-like object | entailment |
def create_symlink(self, link, destination, **kwargs):
"""Create a symbolic link at ``link`` pointing to ``destination``.
:param link: the path to be created that points to target
:param destination: the target of the symbolic link
:param createParent: If the parent directories do not exist, should they be created?
:type createParent: bool
:raises HdfsUnsupportedOperationException: This feature doesn't actually work, at least on
CDH 5.3.0.
"""
response = self._put(link, 'CREATESYMLINK', destination=destination, **kwargs)
assert not response.content | Create a symbolic link at ``link`` pointing to ``destination``.
:param link: the path to be created that points to target
:param destination: the target of the symbolic link
:param createParent: If the parent directories do not exist, should they be created?
:type createParent: bool
:raises HdfsUnsupportedOperationException: This feature doesn't actually work, at least on
CDH 5.3.0. | entailment |
def rename(self, path, destination, **kwargs):
"""Renames Path src to Path dst.
:returns: true if rename is successful
:rtype: bool
"""
return _json(self._put(path, 'RENAME', destination=destination, **kwargs))['boolean'] | Renames Path src to Path dst.
:returns: true if rename is successful
:rtype: bool | entailment |
def get_file_status(self, path, **kwargs):
"""Return a :py:class:`FileStatus` object that represents the path."""
return FileStatus(**_json(self._get(path, 'GETFILESTATUS', **kwargs))['FileStatus']) | Return a :py:class:`FileStatus` object that represents the path. | entailment |
def list_status(self, path, **kwargs):
"""List the statuses of the files/directories in the given path if the path is a directory.
:rtype: ``list`` of :py:class:`FileStatus` objects
"""
return [
FileStatus(**item) for item in
_json(self._get(path, 'LISTSTATUS', **kwargs))['FileStatuses']['FileStatus']
] | List the statuses of the files/directories in the given path if the path is a directory.
:rtype: ``list`` of :py:class:`FileStatus` objects | entailment |
def get_content_summary(self, path, **kwargs):
"""Return the :py:class:`ContentSummary` of a given Path."""
return ContentSummary(
**_json(self._get(path, 'GETCONTENTSUMMARY', **kwargs))['ContentSummary']) | Return the :py:class:`ContentSummary` of a given Path. | entailment |
def get_file_checksum(self, path, **kwargs):
"""Get the checksum of a file.
:rtype: :py:class:`FileChecksum`
"""
metadata_response = self._get(
path, 'GETFILECHECKSUM', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
assert not metadata_response.content
data_response = self._requests_session.get(
metadata_response.headers['location'], **self._requests_kwargs)
_check_response(data_response)
return FileChecksum(**_json(data_response)['FileChecksum']) | Get the checksum of a file.
:rtype: :py:class:`FileChecksum` | entailment |
def set_permission(self, path, **kwargs):
"""Set permission of a path.
:param permission: The permission of a file/directory. Any radix-8 integer (leading zeros
may be omitted.)
:type permission: octal
"""
response = self._put(path, 'SETPERMISSION', **kwargs)
assert not response.content | Set permission of a path.
:param permission: The permission of a file/directory. Any radix-8 integer (leading zeros
may be omitted.)
:type permission: octal | entailment |
def set_owner(self, path, **kwargs):
"""Set owner of a path (i.e. a file or a directory).
The parameters owner and group cannot both be null.
:param owner: user
:param group: group
"""
response = self._put(path, 'SETOWNER', **kwargs)
assert not response.content | Set owner of a path (i.e. a file or a directory).
The parameters owner and group cannot both be null.
:param owner: user
:param group: group | entailment |
def set_times(self, path, **kwargs):
"""Set access time of a file.
:param modificationtime: Set the modification time of this file. The number of milliseconds
since Jan 1, 1970.
:type modificationtime: long
:param accesstime: Set the access time of this file. The number of milliseconds since Jan 1
1970.
:type accesstime: long
"""
response = self._put(path, 'SETTIMES', **kwargs)
assert not response.content | Set access time of a file.
:param modificationtime: Set the modification time of this file. The number of milliseconds
since Jan 1, 1970.
:type modificationtime: long
:param accesstime: Set the access time of this file. The number of milliseconds since Jan 1
1970.
:type accesstime: long | entailment |
def set_xattr(self, path, xattr_name, xattr_value, flag, **kwargs):
"""Set an xattr of a file or directory.
:param xattr_name: The name must be prefixed with the namespace followed by ``.``. For
example, ``user.attr``.
:param flag: ``CREATE`` or ``REPLACE``
"""
kwargs['xattr.name'] = xattr_name
kwargs['xattr.value'] = xattr_value
response = self._put(path, 'SETXATTR', flag=flag, **kwargs)
assert not response.content | Set an xattr of a file or directory.
:param xattr_name: The name must be prefixed with the namespace followed by ``.``. For
example, ``user.attr``.
:param flag: ``CREATE`` or ``REPLACE`` | entailment |
def remove_xattr(self, path, xattr_name, **kwargs):
"""Remove an xattr of a file or directory."""
kwargs['xattr.name'] = xattr_name
response = self._put(path, 'REMOVEXATTR', **kwargs)
assert not response.content | Remove an xattr of a file or directory. | entailment |
def get_xattrs(self, path, xattr_name=None, encoding='text', **kwargs):
"""Get one or more xattr values for a file or directory.
:param xattr_name: ``str`` to get one attribute, ``list`` to get multiple attributes,
``None`` to get all attributes.
:param encoding: ``text`` | ``hex`` | ``base64``, defaults to ``text``
:returns: Dictionary mapping xattr name to value. With text encoding, the value will be a
unicode string. With hex or base64 encoding, the value will be a byte array.
:rtype: dict
"""
kwargs['xattr.name'] = xattr_name
json = _json(self._get(path, 'GETXATTRS', encoding=encoding, **kwargs))['XAttrs']
# Decode the result
result = {}
for attr in json:
k = attr['name']
v = attr['value']
if v is None:
result[k] = None
elif encoding == 'text':
assert attr['value'].startswith('"') and attr['value'].endswith('"')
result[k] = v[1:-1]
elif encoding == 'hex':
assert attr['value'].startswith('0x')
# older python demands bytes, so we have to ascii encode
result[k] = binascii.unhexlify(v[2:].encode('ascii'))
elif encoding == 'base64':
assert attr['value'].startswith('0s')
# older python demands bytes, so we have to ascii encode
result[k] = base64.b64decode(v[2:].encode('ascii'))
else:
warnings.warn("Unexpected encoding {}".format(encoding))
result[k] = v
return result | Get one or more xattr values for a file or directory.
:param xattr_name: ``str`` to get one attribute, ``list`` to get multiple attributes,
``None`` to get all attributes.
:param encoding: ``text`` | ``hex`` | ``base64``, defaults to ``text``
:returns: Dictionary mapping xattr name to value. With text encoding, the value will be a
unicode string. With hex or base64 encoding, the value will be a byte array.
:rtype: dict | entailment |
def list_xattrs(self, path, **kwargs):
"""Get all of the xattr names for a file or directory.
:rtype: list
"""
return simplejson.loads(_json(self._get(path, 'LISTXATTRS', **kwargs))['XAttrNames']) | Get all of the xattr names for a file or directory.
:rtype: list | entailment |
def delete_snapshot(self, path, snapshotname, **kwargs):
"""Delete a snapshot of a directory"""
response = self._delete(path, 'DELETESNAPSHOT', snapshotname=snapshotname, **kwargs)
assert not response.content | Delete a snapshot of a directory | entailment |
def rename_snapshot(self, path, oldsnapshotname, snapshotname, **kwargs):
"""Rename a snapshot"""
response = self._put(path, 'RENAMESNAPSHOT',
oldsnapshotname=oldsnapshotname, snapshotname=snapshotname, **kwargs)
assert not response.content | Rename a snapshot | entailment |
def listdir(self, path, **kwargs):
"""Return a list containing names of files in the given path"""
statuses = self.list_status(path, **kwargs)
if len(statuses) == 1 and statuses[0].pathSuffix == '' and statuses[0].type == 'FILE':
raise NotADirectoryError('Not a directory: {!r}'.format(path))
return [f.pathSuffix for f in statuses] | Return a list containing names of files in the given path | entailment |
def exists(self, path, **kwargs):
"""Return true if the given path exists"""
try:
self.get_file_status(path, **kwargs)
return True
except HdfsFileNotFoundException:
return False | Return true if the given path exists | entailment |
def walk(self, top, topdown=True, onerror=None, **kwargs):
"""See ``os.walk`` for documentation"""
try:
listing = self.list_status(top, **kwargs)
except HdfsException as e:
if onerror is not None:
onerror(e)
return
dirnames, filenames = [], []
for f in listing:
if f.type == 'DIRECTORY':
dirnames.append(f.pathSuffix)
elif f.type == 'FILE':
filenames.append(f.pathSuffix)
else: # pragma: no cover
raise AssertionError("Unexpected type {}".format(f.type))
if topdown:
yield top, dirnames, filenames
for name in dirnames:
new_path = posixpath.join(top, name)
for x in self.walk(new_path, topdown, onerror, **kwargs):
yield x
if not topdown:
yield top, dirnames, filenames | See ``os.walk`` for documentation | entailment |
def copy_from_local(self, localsrc, dest, **kwargs):
"""Copy a single file from the local file system to ``dest``
Takes all arguments that :py:meth:`create` takes.
"""
with io.open(localsrc, 'rb') as f:
self.create(dest, f, **kwargs) | Copy a single file from the local file system to ``dest``
Takes all arguments that :py:meth:`create` takes. | entailment |
def copy_to_local(self, src, localdest, **kwargs):
"""Copy a single file from ``src`` to the local file system
Takes all arguments that :py:meth:`open` takes.
"""
with self.open(src, **kwargs) as fsrc:
with io.open(localdest, 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst) | Copy a single file from ``src`` to the local file system
Takes all arguments that :py:meth:`open` takes. | entailment |
def get_active_namenode(self, max_staleness=None):
"""Return the address of the currently active NameNode.
:param max_staleness: This function caches the active NameNode. If this age of this cached
result is less than ``max_staleness`` seconds, return it. Otherwise, or if this
parameter is None, do a lookup.
:type max_staleness: float
:raises HdfsNoServerException: can't find an active NameNode
"""
if (max_staleness is None or
self._last_time_recorded_active is None or
self._last_time_recorded_active < time.time() - max_staleness):
# Make a cheap request and rely on the reordering in self._record_last_active
self.get_file_status('/')
return self.hosts[0] | Return the address of the currently active NameNode.
:param max_staleness: This function caches the active NameNode. If this age of this cached
result is less than ``max_staleness`` seconds, return it. Otherwise, or if this
parameter is None, do a lookup.
:type max_staleness: float
:raises HdfsNoServerException: can't find an active NameNode | entailment |
def next(self):
"""
Reads the next dataset row.
:return: the next row
:rtype: Instance
"""
if not self.__has_more():
raise StopIteration()
else:
return javabridge.get_env().get_string(self.__next()) | Reads the next dataset row.
:return: the next row
:rtype: Instance | entailment |
def main():
"""
Runs a associator from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(
description='Executes an associator from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="training set file")
parser.add_argument("associator", help="associator classname, e.g., weka.associations.Apriori")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional associator options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
associator = Associator(classname=parsed.associator)
if len(parsed.option) > 0:
associator.options = parsed.option
loader = converters.loader_for_file(parsed.train)
data = loader.load_file(parsed.train)
associator.build_associations(data)
print(str(associator))
except Exception, e:
print(e)
finally:
jvm.stop() | Runs a associator from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. | entailment |
def next(self):
"""
Returns the next rule.
:return: the next rule object
:rtype: AssociationRule
"""
if self.index < self.length:
index = self.index
self.index += 1
return self.rules[index]
else:
raise StopIteration() | Returns the next rule.
:return: the next rule object
:rtype: AssociationRule | entailment |
def _build_tree(self, actor, content):
"""
Builds the tree for the given actor.
:param actor: the actor to process
:type actor: Actor
:param content: the rows of the tree collected so far
:type content: list
"""
depth = actor.depth
row = ""
for i in xrange(depth - 1):
row += "| "
if depth > 0:
row += "|-"
name = actor.name
if name != actor.__class__.__name__:
name = actor.__class__.__name__ + " '" + name + "'"
row += name
quickinfo = actor.quickinfo
if quickinfo is not None:
row += " [" + quickinfo + "]"
content.append(row)
if isinstance(actor, ActorHandler):
for sub in actor.actors:
self._build_tree(sub, content) | Builds the tree for the given actor.
:param actor: the actor to process
:type actor: Actor
:param content: the rows of the tree collected so far
:type content: list | entailment |
def check_actors(self):
"""
Checks the actors of the owner. Raises an exception if invalid.
"""
actors = []
for actor in self.owner.actors:
if actor.skip:
continue
actors.append(actor)
if len(actors) == 0:
return
if not self.allow_source and base.is_source(actors[0]):
raise Exception("Actor '" + actors[0].full_name + "' is a source, but no sources allowed!")
for i in xrange(1, len(actors)):
if not isinstance(actors[i], InputConsumer):
raise Exception("Actor does not accept any input: " + actors[i].full_name) | Checks the actors of the owner. Raises an exception if invalid. | entailment |
def save(cls, flow, fname):
"""
Saves the flow to a JSON file.
:param flow: the flow to save
:type flow: Flow
:param fname: the file to load
:type fname: str
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
try:
f = open(fname, 'w')
f.write(flow.to_json())
f.close()
except Exception, e:
result = str(e)
return result | Saves the flow to a JSON file.
:param flow: the flow to save
:type flow: Flow
:param fname: the file to load
:type fname: str
:return: None if successful, otherwise error message
:rtype: str | entailment |
def setup(self):
"""
Performs some checks.
:return: None if successful, otherwise error message.
:rtype: str
"""
result = super(BranchDirector, self).setup()
if result is None:
try:
self.check_actors()
except Exception, e:
result = str(e)
return result | Performs some checks.
:return: None if successful, otherwise error message.
:rtype: str | entailment |
def ranked_attributes(self):
"""
Returns the matrix of ranked attributes from the last run.
:return: the Numpy matrix
:rtype: ndarray
"""
matrix = javabridge.call(self.jobject, "rankedAttributes", "()[[D")
if matrix is None:
return None
else:
return arrays.double_matrix_to_ndarray(matrix) | Returns the matrix of ranked attributes from the last run.
:return: the Numpy matrix
:rtype: ndarray | entailment |
def next(self):
"""
Returns the next element from the array.
:return: the next array element object, wrapped as JavaObject if not null
:rtype: JavaObject or None
"""
if self.index < self.length:
index = self.index
self.index += 1
return self.data[index]
else:
raise StopIteration() | Returns the next element from the array.
:return: the next array element object, wrapped as JavaObject if not null
:rtype: JavaObject or None | entailment |
def options(self):
"""
Obtains the currently set options as list.
:return: the list of options
:rtype: list
"""
if self.is_optionhandler:
return types.string_array_to_list(javabridge.call(self.jobject, "getOptions", "()[Ljava/lang/String;"))
else:
return [] | Obtains the currently set options as list.
:return: the list of options
:rtype: list | entailment |
def options(self, options):
"""
Sets the command-line options (as list).
:param options: the list of command-line options to set
:type options: list
"""
if self.is_optionhandler:
javabridge.call(self.jobject, "setOptions", "([Ljava/lang/String;)V", types.string_list_to_array(options)) | Sets the command-line options (as list).
:param options: the list of command-line options to set
:type options: list | entailment |
def plot_cluster_assignments(evl, data, atts=None, inst_no=False, size=10, title=None, outfile=None, wait=True):
"""
Plots the cluster assignments against the specified attributes.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evl: the cluster evaluation to obtain the cluster assignments from
:type evl: ClusterEvaluation
:param data: the dataset the clusterer was evaluated against
:type data: Instances
:param atts: the list of attribute indices to plot, None for all
:type atts: list
:param inst_no: whether to include a fake attribute with the instance number
:type inst_no: bool
:param size: the size of the circles in point
:type size: int
:param title: an optional title
:type title: str
:param outfile: the (optional) file to save the generated plot to. The extension determines the file format.
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
fig = plt.figure()
if data.class_index == -1:
c = None
else:
c = []
for i in xrange(data.num_instances):
inst = data.get_instance(i)
c.append(inst.get_value(inst.class_index))
if atts is None:
atts = []
for i in xrange(data.num_attributes):
atts.append(i)
num_plots = len(atts)
if inst_no:
num_plots += 1
clusters = evl.cluster_assignments
for index, att in enumerate(atts):
x = data.values(att)
ax = fig.add_subplot(
1, num_plots, index + 1)
if c is None:
ax.scatter(clusters, x, s=size, alpha=0.5)
else:
ax.scatter(clusters, x, c=c, s=size, alpha=0.5)
ax.set_xlabel("Clusters")
ax.set_title(data.attribute(att).name)
ax.get_xaxis().set_ticks(list(set(clusters)))
ax.grid(True)
if inst_no:
x = []
for i in xrange(data.num_instances):
x.append(i+1)
ax = fig.add_subplot(
1, num_plots, num_plots)
if c is None:
ax.scatter(clusters, x, s=size, alpha=0.5)
else:
ax.scatter(clusters, x, c=c, s=size, alpha=0.5)
ax.set_xlabel("Clusters")
ax.set_title("Instance number")
ax.get_xaxis().set_ticks(list(set(clusters)))
ax.grid(True)
if title is None:
title = data.relationname
fig.canvas.set_window_title(title)
plt.draw()
if not outfile is None:
plt.savefig(outfile)
if wait:
plt.show() | Plots the cluster assignments against the specified attributes.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evl: the cluster evaluation to obtain the cluster assignments from
:type evl: ClusterEvaluation
:param data: the dataset the clusterer was evaluated against
:type data: Instances
:param atts: the list of attribute indices to plot, None for all
:type atts: list
:param inst_no: whether to include a fake attribute with the instance number
:type inst_no: bool
:param size: the size of the circles in point
:type size: int
:param title: an optional title
:type title: str
:param outfile: the (optional) file to save the generated plot to. The extension determines the file format.
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool | entailment |
def write_all(filename, jobjects):
"""
Serializes the list of objects to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobjects: the list of objects to serialize
:type jobjects: list
"""
array = javabridge.get_env().make_object_array(len(jobjects), javabridge.get_env().find_class("java/lang/Object"))
for i in xrange(len(jobjects)):
obj = jobjects[i]
if isinstance(obj, JavaObject):
obj = obj.jobject
javabridge.get_env().set_object_array_element(array, i, obj)
javabridge.static_call(
"Lweka/core/SerializationHelper;", "writeAll",
"(Ljava/lang/String;[Ljava/lang/Object;)V",
filename, array) | Serializes the list of objects to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobjects: the list of objects to serialize
:type jobjects: list | entailment |
def main():
"""
Runs a filter from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(
description='Executes a filter from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-i", metavar="input1", dest="input1", required=True, help="input file 1")
parser.add_argument("-o", metavar="output1", dest="output1", required=True, help="output file 1")
parser.add_argument("-r", metavar="input2", dest="input2", help="input file 2")
parser.add_argument("-s", metavar="output2", dest="output2", help="output file 2")
parser.add_argument("-c", metavar="classindex", default="-1", dest="classindex",
help="1-based class attribute index")
parser.add_argument("filter", help="filter classname, e.g., weka.filters.AllFilter")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional filter options")
parsed = parser.parse_args()
if parsed.input2 is None and parsed.output2 is not None:
raise Exception("No second input file provided ('-r ...')!")
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.input1 is not None:
params.extend(["-i", parsed.input1])
if parsed.output1 is not None:
params.extend(["-o", parsed.output1])
if parsed.input2 is not None:
params.extend(["-r", parsed.input2])
if parsed.output2 is not None:
params.extend(["-s", parsed.output2])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
flter = Filter(parsed.filter)
if len(parsed.option) > 0:
flter.options = parsed.option
loader = Loader(classname="weka.core.converters.ArffLoader")
in1 = loader.load_file(parsed.input1)
cls = parsed.classindex
if str(parsed.classindex) == "first":
cls = "0"
if str(parsed.classindex) == "last":
cls = str(in1.num_attributes - 1)
in1.class_index = int(cls)
flter.inputformat(in1)
out1 = flter.filter(in1)
saver = Saver(classname="weka.core.converters.ArffSaver")
saver.save_file(out1, parsed.output1)
if parsed.input2 is not None:
in2 = loader.load_file(parsed.input2)
in2.class_index = int(cls)
out2 = flter.filter(in2)
saver.save_file(out2, parsed.output2)
except Exception, e:
print(e)
finally:
jvm.stop() | Runs a filter from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. | entailment |
def next(self):
"""
Reads the next dataset row.
:return: the next row
:rtype: Instance
"""
result = javabridge.call(
self.loader.jobject, "getNextInstance",
"(Lweka/core/Instances;)Lweka/core/Instance;", self.structure.jobject)
if result is None:
raise StopIteration()
else:
return Instance(result) | Reads the next dataset row.
:return: the next row
:rtype: Instance | entailment |
def plot_classifier_errors(predictions, absolute=True, max_relative_size=50, absolute_size=50, title=None,
outfile=None, wait=True):
"""
Plots the classifers for the given list of predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param predictions: the predictions to plot
:type predictions: list
:param absolute: whether to use absolute errors as size or relative ones
:type absolute: bool
:param max_relative_size: the maximum size in point in case of relative mode
:type max_relative_size: int
:param absolute_size: the size in point in case of absolute mode
:type absolute_size: int
:param title: an optional title
:type title: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
actual = []
predicted = []
error = None
cls = None
for pred in predictions:
actual.append(pred.actual)
predicted.append(pred.predicted)
if isinstance(pred, NumericPrediction):
if error is None:
error = []
error.append(abs(pred.error))
elif isinstance(pred, NominalPrediction):
if cls is None:
cls = []
if pred.actual != pred.predicted:
cls.append(1)
else:
cls.append(0)
fig, ax = plt.subplots()
if error is None and cls is None:
ax.scatter(actual, predicted, s=absolute_size, alpha=0.5)
elif cls is not None:
ax.scatter(actual, predicted, c=cls, s=absolute_size, alpha=0.5)
elif error is not None:
if not absolute:
min_err = min(error)
max_err = max(error)
factor = (max_err - min_err) / max_relative_size
for i in xrange(len(error)):
error[i] = error[i] / factor * max_relative_size
ax.scatter(actual, predicted, s=error, alpha=0.5)
ax.set_xlabel("actual")
ax.set_ylabel("predicted")
if title is None:
title = "Classifier errors"
ax.set_title(title)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3")
ax.grid(True)
fig.canvas.set_window_title(title)
plt.draw()
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show() | Plots the classifers for the given list of predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param predictions: the predictions to plot
:type predictions: list
:param absolute: whether to use absolute errors as size or relative ones
:type absolute: bool
:param max_relative_size: the maximum size in point in case of relative mode
:type max_relative_size: int
:param absolute_size: the size in point in case of absolute mode
:type absolute_size: int
:param title: an optional title
:type title: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool | entailment |
def main():
"""
Runs a classifier from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(
description='Performs classification/regression from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="Training set file")
parser.add_argument("-T", metavar="test", dest="test", help="Test set file")
parser.add_argument("-c", metavar="class index", dest="classindex", help="1-based class attribute index")
parser.add_argument("-d", metavar="outmodel", dest="outmodel", help="model output file name")
parser.add_argument("-l", metavar="inmodel", dest="inmodel", help="model input file name")
parser.add_argument("-x", metavar="num folds", dest="numfolds", help="number of folds for cross-validation")
parser.add_argument("-s", metavar="seed", dest="seed", help="seed value for randomization")
parser.add_argument("-v", action="store_true", dest="notrainstats", help="no statistics for training")
parser.add_argument("-o", action="store_true", dest="onlystats", help="only statistics, don't output model")
parser.add_argument("-i", action="store_true", dest="irstats", help="output information retrieval statistics")
parser.add_argument("-k", action="store_true", dest="itstats", help="output information theoretic statistics")
parser.add_argument("-m", metavar="costmatrix", dest="costmatrix", help="cost matrix file")
parser.add_argument("-g", metavar="graph", dest="graph", help="output file for graph (if supported)")
parser.add_argument("classifier", help="classifier classname, e.g., weka.classifiers.trees.J48")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional classifier options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.train is not None:
params.extend(["-t", parsed.train])
if parsed.test is not None:
params.extend(["-T", parsed.test])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
if parsed.outmodel is not None:
params.extend(["-d", parsed.outmodel])
if parsed.inmodel is not None:
params.extend(["-l", parsed.inmodel])
if parsed.numfolds is not None:
params.extend(["-x", parsed.numfolds])
if parsed.seed is not None:
params.extend(["-s", parsed.seed])
if parsed.notrainstats:
params.append("-v")
if parsed.onlystats:
params.append("-o")
if parsed.irstats:
params.append("-i")
if parsed.itstats:
params.append("-k")
if parsed.costmatrix is not None:
params.extend(["-m", parsed.costmatrix])
if parsed.graph is not None:
params.extend(["-g", parsed.graph])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
classifier = Classifier(classname=parsed.classifier)
if len(parsed.option) > 0:
classifier.options = parsed.option
print(Evaluation.evaluate_model(classifier, params))
except Exception, e:
print(e)
finally:
jvm.stop() | Runs a classifier from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. | entailment |
def distributions_for_instances(self, data):
"""
Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray
"""
if self.is_batchpredictor:
return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject))
else:
return None | Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray | entailment |
def values(self, index):
"""
Returns the internal values of this attribute from all the instance objects.
:return: the values as numpy array
:rtype: list
"""
values = []
for i in xrange(self.num_instances):
inst = self.get_instance(i)
values.append(inst.get_value(index))
return numpy.array(values) | Returns the internal values of this attribute from all the instance objects.
:return: the values as numpy array
:rtype: list | entailment |
def append_instances(cls, inst1, inst2):
"""
Merges the two datasets (one-after-the-other). Throws an exception if the datasets aren't compatible.
:param inst1: the first dataset
:type inst1: Instances
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances
"""
msg = inst1.equal_headers(inst2)
if msg is not None:
raise Exception("Cannot appent instances: " + msg)
result = cls.copy_instances(inst1)
for i in xrange(inst2.num_instances):
result.add_instance(inst2.get_instance(i))
return result | Merges the two datasets (one-after-the-other). Throws an exception if the datasets aren't compatible.
:param inst1: the first dataset
:type inst1: Instances
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances | entailment |
def values(self):
"""
Returns the labels, strings or relation-values.
:return: all the values, None if not NOMINAL, STRING, or RELATION
:rtype: list
"""
enm = javabridge.call(self.jobject, "enumerateValues", "()Ljava/util/Enumeration;")
if enm is None:
return None
else:
return types.enumeration_to_list(enm) | Returns the labels, strings or relation-values.
:return: all the values, None if not NOMINAL, STRING, or RELATION
:rtype: list | entailment |
def next(self):
"""
Returns the next row from the Instances object.
:return: the next Instance object
:rtype: Instance
"""
if self.row < self.data.num_instances:
index = self.row
self.row += 1
return self.data.get_instance(index)
else:
raise StopIteration() | Returns the next row from the Instances object.
:return: the next Instance object
:rtype: Instance | entailment |
def next(self):
"""
Returns the next attribute from the Instances object.
:return: the next Attribute object
:rtype: Attribute
"""
if self.col < self.data.num_attributes:
index = self.col
self.col += 1
return self.data.attribute(index)
else:
raise StopIteration() | Returns the next attribute from the Instances object.
:return: the next Attribute object
:rtype: Attribute | entailment |
def check(self, diff):
"""Check that the new file introduced is a python source file"""
path = diff.b_path
assert any(
path.endswith(ext)
for ext in importlib.machinery.SOURCE_SUFFIXES
) | Check that the new file introduced is a python source file | entailment |
def check(self, diff):
"""Check that the new file is within the contrib subdirectory"""
path = diff.b_path
contrib_path = self.project.contrib_module_path
assert pathlib.Path(contrib_path) in pathlib.Path(path).parents | Check that the new file is within the contrib subdirectory | entailment |
def check(self, diff):
"""Check that the name of the subpackage within contrib is valid
The package name must match ``user_[a-zA-Z0-9_]+``.
"""
relative_path = relative_to_contrib(diff, self.project)
subpackage_name = relative_path.parts[0]
assert re_test(SUBPACKAGE_NAME_REGEX, subpackage_name) | Check that the name of the subpackage within contrib is valid
The package name must match ``user_[a-zA-Z0-9_]+``. | entailment |
def check(self, diff):
"""Check that the new file introduced is at the proper depth
The proper depth is 2 (contrib/user_example/new_file.py)
"""
relative_path = relative_to_contrib(diff, self.project)
assert len(relative_path.parts) == 2 | Check that the new file introduced is at the proper depth
The proper depth is 2 (contrib/user_example/new_file.py) | entailment |
def check(self, diff):
r"""Check that the new file introduced has a valid name
The module can either be an __init__.py file or must
match ``feature_[a-zA-Z0-9_]+\.\w+``.
"""
filename = pathlib.Path(diff.b_path).parts[-1]
is_valid_feature_module_name = re_test(
FEATURE_MODULE_NAME_REGEX, filename)
is_valid_init_module_name = filename == '__init__.py'
assert is_valid_feature_module_name or is_valid_init_module_name | r"""Check that the new file introduced has a valid name
The module can either be an __init__.py file or must
match ``feature_[a-zA-Z0-9_]+\.\w+``. | entailment |
def check(self, diff):
"""Check that if the new file is __init__.py, then it is empty"""
path = pathlib.Path(diff.b_path)
filename = path.parts[-1]
if filename == '__init__.py':
abspath = self.project.path.joinpath(path)
assert isemptyfile(abspath) | Check that if the new file is __init__.py, then it is empty | entailment |
def enable(logger=logger,
level=logging.INFO,
format=DETAIL_LOG_FORMAT,
echo=True):
"""Enable simple console logging for this module"""
global _handler
if _handler is None:
_handler = logging.StreamHandler()
formatter = logging.Formatter(format)
_handler.setFormatter(formatter)
level = logging._checkLevel(level)
levelName = logging._levelToName[level]
logger.setLevel(level)
_handler.setLevel(level)
if _handler not in logger.handlers:
logger.addHandler(_handler)
if echo:
logger.log(
level, 'Logging enabled at level {name}.'.format(name=levelName)) | Enable simple console logging for this module | entailment |
def default_base_dir():
"""Determine the default base directory path
If the OS_REFRESH_CONFIG_BASE_DIR environment variable is set,
use its value.
Otherwise, prefer the new default path, but still allow the old one for
backwards compatibility.
"""
base_dir = os.environ.get('OS_REFRESH_CONFIG_BASE_DIR')
if base_dir is None:
# NOTE(bnemec): Prefer the new location, but still allow the old one.
if os.path.isdir(OLD_BASE_DIR) and not os.path.isdir(DEFAULT_BASE_DIR):
logging.warning('Base directory %s is deprecated. The recommended '
'base directory is %s',
OLD_BASE_DIR, DEFAULT_BASE_DIR)
base_dir = OLD_BASE_DIR
else:
base_dir = DEFAULT_BASE_DIR
return base_dir | Determine the default base directory path
If the OS_REFRESH_CONFIG_BASE_DIR environment variable is set,
use its value.
Otherwise, prefer the new default path, but still allow the old one for
backwards compatibility. | entailment |
def blacken_code(code):
"""Format code content using Black
Args:
code (str): code as string
Returns:
str
"""
if black is None:
raise NotImplementedError
major, minor, _ = platform.python_version_tuple()
pyversion = 'py{major}{minor}'.format(major=major, minor=minor)
target_versions = [black.TargetVersion[pyversion.upper()]]
line_length = black.DEFAULT_LINE_LENGTH
string_normalization = True
mode = black.FileMode(
target_versions=target_versions,
line_length=line_length,
string_normalization=string_normalization,
)
return black.format_file_contents(code, fast=False, mode=mode) | Format code content using Black
Args:
code (str): code as string
Returns:
str | entailment |
def _writer(func):
"""
Decorator for a custom writer, but a default reader
"""
name = func.__name__
return property(fget=lambda self: getattr(self, '_%s' % name), fset=func) | Decorator for a custom writer, but a default reader | entailment |
def pem_armor_csr(certification_request):
"""
Encodes a CSR into PEM format
:param certification_request:
An asn1crypto.csr.CertificationRequest object of the CSR to armor.
Typically this is obtained from CSRBuilder.build().
:return:
A byte string of the PEM-encoded CSR
"""
if not isinstance(certification_request, csr.CertificationRequest):
raise TypeError(_pretty_message(
'''
certification_request must be an instance of
asn1crypto.csr.CertificationRequest, not %s
''',
_type_name(certification_request)
))
return pem.armor(
'CERTIFICATE REQUEST',
certification_request.dump()
) | Encodes a CSR into PEM format
:param certification_request:
An asn1crypto.csr.CertificationRequest object of the CSR to armor.
Typically this is obtained from CSRBuilder.build().
:return:
A byte string of the PEM-encoded CSR | entailment |
def _pretty_message(string, *params):
"""
Takes a multi-line string and does the following:
- dedents
- converts newlines with text before and after into a single line
- strips leading and trailing whitespace
:param string:
The string to format
:param *params:
Params to interpolate into the string
:return:
The formatted string
"""
output = textwrap.dedent(string)
# Unwrap lines, taking into account bulleted lists, ordered lists and
# underlines consisting of = signs
if output.find('\n') != -1:
output = re.sub('(?<=\\S)\n(?=[^ \n\t\\d\\*\\-=])', ' ', output)
if params:
output = output % params
output = output.strip()
return output | Takes a multi-line string and does the following:
- dedents
- converts newlines with text before and after into a single line
- strips leading and trailing whitespace
:param string:
The string to format
:param *params:
Params to interpolate into the string
:return:
The formatted string | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.