sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _delay_for_ratelimits(cls, start):
"""If request was shorter than max request time, delay"""
stop = datetime.now()
duration_microseconds = (stop - start).microseconds
if duration_microseconds < cls.REQUEST_TIME_MICROSECONDS:
time.sleep((cls.REQUEST_TIME_MICROSECONDS - duration_microseconds)
/ MICROSECONDS_PER_SECOND) | If request was shorter than max request time, delay | entailment |
def load_data(filename, format_file='cloudupdrs'):
"""
This is a general load data method where the format of data to load can be passed as a parameter,
:param str filename: The path to load data from
:param str format_file: format of the file. Default is CloudUPDRS ('cloudupdrs'). Set to 'mpower' for mpower data.
:return DataFrame dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration data_frame.index is the datetime-like index
"""
logging.debug("{} data --> Loaded".format(format_file))
data_frame = load_data(filename, format_file)
data_frame.sampling_rate = get_sampling_rate_from_timestamp(data_frame)
return data_frame | This is a general load data method where the format of data to load can be passed as a parameter,
:param str filename: The path to load data from
:param str format_file: format of the file. Default is CloudUPDRS ('cloudupdrs'). Set to 'mpower' for mpower data.
:return DataFrame dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration data_frame.index is the datetime-like index | entailment |
def run(self):
"""
Run the configured method and write the HTTP response status and text
to the output stream.
"""
region = AWSServiceRegion(access_key=self.key, secret_key=self.secret,
uri=self.endpoint)
query = self.query_factory(action=self.action, creds=region.creds,
endpoint=region.ec2_endpoint,
other_params=self.parameters)
def write_response(response):
print >> self.output, "URL: %s" % query.client.url
print >> self.output
print >> self.output, "HTTP status code: %s" % query.client.status
print >> self.output
print >> self.output, response
def write_error(failure):
if failure.check(AWSError):
message = failure.value.original
else:
message = failure.getErrorMessage()
if message.startswith("Error Message: "):
message = message[len("Error Message: "):]
print >> self.output, "URL: %s" % query.client.url
print >> self.output
if getattr(query.client, "status", None) is not None:
print >> self.output, "HTTP status code: %s" % (
query.client.status,)
print >> self.output
print >> self.output, message
if getattr(failure.value, "response", None) is not None:
print >> self.output
print >> self.output, failure.value.response
deferred = query.submit()
deferred.addCallback(write_response)
deferred.addErrback(write_error)
return deferred | Run the configured method and write the HTTP response status and text
to the output stream. | entailment |
def resample_signal(self, data_frame):
"""
Convenience method for frequency conversion and resampling of data frame.
Object must have a DatetimeIndex. After re-sampling, this methods interpolate the time magnitude sum
acceleration values and the x,y,z values of the data frame acceleration
:param data_frame: the data frame to resample
:param str sampling_frequency: the sampling frequency. Default is 100Hz, as recommended by the author of the pilot study [1]
"""
new_freq = np.round(1 / self.sampling_frequency, decimals=6)
df_resampled = data_frame.resample(str(new_freq) + 'S').mean()
# f = interpolate.interp1d(data_frame.td, data_frame.mag_sum_acc)
# new_timestamp = np.arange(data_frame.td[0], data_frame.td[-1], 1.0 / self.sampling_frequency)
# df_resampled.mag_sum_acc = f(new_timestamp)
logging.debug("resample signal")
df_resampled = df_resampled.interpolate(method='linear')
get_sampling_rate_from_timestamp(df_resampled)
# df_resampled['td'] = df_resampled.index - df_resampled.index[0]
return df_resampled | Convenience method for frequency conversion and resampling of data frame.
Object must have a DatetimeIndex. After re-sampling, this methods interpolate the time magnitude sum
acceleration values and the x,y,z values of the data frame acceleration
:param data_frame: the data frame to resample
:param str sampling_frequency: the sampling frequency. Default is 100Hz, as recommended by the author of the pilot study [1] | entailment |
def filter_data_frame(self, data_frame, centre=False, keep_cols=['anno']):
"""
This method filters a data frame signal as suggested in [1]. First step is to high pass filter the data
frame using a butter Butterworth digital and analog filter
(https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html). Then the method
filter the data frame along one-dimension using a digital filter.
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html)
:param data_frame: the data frame
:param str cutoff_frequency: The path to load data from
:param str filter_order: format of the file. Default is CloudUPDRS. Set to mpower for mpower data.
"""
b_f = lambda x: butter_lowpass_filter(x.values, self.sampling_frequency, cutoff=self.cutoff_frequency, order=self.filter_order)
filtered_data_frame = data_frame.apply(b_f, 0)
# we don't need to filter the time difference
# filtered_data_frame.td = data_frame.td
logging.debug("filtered whole dataframe!")
# I need to fix this as I am losing some important information
# one idea would be to look at where the sign changes (first and second peak)
# and keep that information aswell.
if centre:
# de-mean
filtered_data_frame -= filtered_data_frame.mean()
for col in filtered_data_frame:
first_zero_crossing = np.argwhere(filtered_data_frame[col] > 0)[0][0]
filtered_data_frame[col][:first_zero_crossing] = 0
# No python3 support :(
# if {*keep_cols}.issubset(filtered_data_frame.columns):
for c in keep_cols:
if c not in filtered_data_frame.columns:
return
filtered_data_frame[keep_cols] = data_frame[keep_cols]
return filtered_data_frame | This method filters a data frame signal as suggested in [1]. First step is to high pass filter the data
frame using a butter Butterworth digital and analog filter
(https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html). Then the method
filter the data frame along one-dimension using a digital filter.
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html)
:param data_frame: the data frame
:param str cutoff_frequency: The path to load data from
:param str filter_order: format of the file. Default is CloudUPDRS. Set to mpower for mpower data. | entailment |
def get_canonical_host(self):
"""
Return the canonical host as for the Host HTTP header specification.
"""
host = self.host.lower()
if self.port is not None:
host = "%s:%s" % (host, self.port)
return host | Return the canonical host as for the Host HTTP header specification. | entailment |
def set_canonical_host(self, canonical_host):
"""
Set host and port from a canonical host string as for the Host HTTP
header specification.
"""
parts = canonical_host.lower().split(":")
self.host = parts[0]
if len(parts) > 1 and parts[1]:
self.port = int(parts[1])
else:
self.port = None | Set host and port from a canonical host string as for the Host HTTP
header specification. | entailment |
def get_uri(self):
"""Get a URL representation of the service."""
uri = "%s://%s%s" % (self.scheme, self.get_canonical_host(), self.path)
return uri | Get a URL representation of the service. | entailment |
def get_client(self, cls, purge_cache=False, *args, **kwds):
"""
This is a general method for getting a client: if present, it is pulled
from the cache; if not, a new one is instantiated and then put into the
cache. This method should not be called directly, but rather by other
client-specific methods (e.g., get_ec2_client).
"""
key = str(cls) + str(args) + str(kwds)
instance = self._clients.get(key)
if purge_cache or not instance:
instance = cls(*args, **kwds)
self._clients[key] = instance
return instance | This is a general method for getting a client: if present, it is pulled
from the cache; if not, a new one is instantiated and then put into the
cache. This method should not be called directly, but rather by other
client-specific methods (e.g., get_ec2_client). | entailment |
def optimized(fn):
"""Decorator that will call the optimized c++ version
of a pycast function if available rather than theo
original pycast function
:param function fn: original pycast function
:return: return the wrapped function
:rtype: function
"""
def _optimized(self, *args, **kwargs):
""" This method calls the pycastC function if
optimization is enabled and the pycastC function
is available.
:param: PyCastObject self: reference to the calling object.
Needs to be passed to the pycastC function,
so that all uts members are available.
:param: list *args: list of arguments the function is called with.
:param: dict **kwargs: dictionary of parameter names and values the function has been called with.
:return result of the function call either from pycast or pycastC module.
:rtype: function
"""
if self.optimizationEnabled:
class_name = self.__class__.__name__
module = self.__module__.replace("pycast", "pycastC")
try:
imported = __import__("%s.%s" % (module, class_name), globals(), locals(), [fn.__name__])
function = getattr(imported, fn.__name__)
return function(self, *args, **kwargs)
except ImportError:
print "[WARNING] Could not enable optimization for %s, %s" % (fn.__name__, self)
return fn(self, *args, **kwargs)
else:
return fn(self, *args, **kwargs)
setattr(_optimized, "__name__", fn.__name__)
setattr(_optimized, "__repr__", fn.__repr__)
setattr(_optimized, "__str__", fn.__str__)
setattr(_optimized, "__doc__", fn.__doc__)
return _optimized | Decorator that will call the optimized c++ version
of a pycast function if available rather than theo
original pycast function
:param function fn: original pycast function
:return: return the wrapped function
:rtype: function | entailment |
def optimize(self, timeSeries, forecastingMethods=None, startingPercentage=0.0, endPercentage=100.0):
"""Runs the optimization on the given TimeSeries.
:param TimeSeries timeSeries: TimeSeries instance that requires an optimized forecast.
:param list forecastingMethods: List of forecastingMethods that will be used for optimization.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:return: Returns the optimized forecasting method with the smallest error.
:rtype: (BaseForecastingMethod, Dictionary)
:raise: Raises a :py:exc:`ValueError` ValueError if no forecastingMethods is empty.
"""
# no forecasting methods provided
if forecastingMethods is None or len(forecastingMethods) == 0:
raise ValueError("forecastingMethods cannot be empty.") | Runs the optimization on the given TimeSeries.
:param TimeSeries timeSeries: TimeSeries instance that requires an optimized forecast.
:param list forecastingMethods: List of forecastingMethods that will be used for optimization.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:return: Returns the optimized forecasting method with the smallest error.
:rtype: (BaseForecastingMethod, Dictionary)
:raise: Raises a :py:exc:`ValueError` ValueError if no forecastingMethods is empty. | entailment |
def include_path(self, path):
"""
Should this path be included based on the include_paths or exclude_paths.
Keeps track of paths seen to allow finding unused filters.
:param path: str: remote path to be filtered
:return: bool: True if we should include the path
"""
self.seen_paths.add(path)
return self.filter.include(path) | Should this path be included based on the include_paths or exclude_paths.
Keeps track of paths seen to allow finding unused filters.
:param path: str: remote path to be filtered
:return: bool: True if we should include the path | entailment |
def get_unused_paths(self):
"""
Returns which include_paths or exclude_paths that were not used via include_path method.
:return: [str] list of filtering paths that were not used.
"""
return [path for path in self.filter.paths if path not in self.seen_paths] | Returns which include_paths or exclude_paths that were not used via include_path method.
:return: [str] list of filtering paths that were not used. | entailment |
def is_child(child_path, parent_path):
"""
Is parent_path a parent(or grandparent) directory of child_path.
:param child_path: str: remote file path
:param parent_path: str: remote file path
:return: bool: True when parent_path is child_path's parent
"""
parent_dir = os.path.join(parent_path, '')
child_dir = os.path.join(child_path, '')
return child_dir.startswith(parent_dir) | Is parent_path a parent(or grandparent) directory of child_path.
:param child_path: str: remote file path
:param parent_path: str: remote file path
:return: bool: True when parent_path is child_path's parent | entailment |
def parent_child_paths(path, some_path):
"""
Is path a parent of some_path or some_path is a parent of path.
:param path: str: remote file path
:param some_path: str: remote file path
:return: bool: True when they are parents
"""
return PathFilterUtil.is_child(path, some_path) or PathFilterUtil.is_child(some_path, path) | Is path a parent of some_path or some_path is a parent of path.
:param path: str: remote file path
:param some_path: str: remote file path
:return: bool: True when they are parents | entailment |
def method(method_class):
"""Decorator to use to mark an API method.
When invoking L{Registry.scan} the classes marked with this decorator
will be added to the registry.
@param method_class: The L{Method} class to register.
"""
def callback(scanner, name, method_class):
if method_class.actions is not None:
actions = method_class.actions
else:
actions = [name]
if method_class.versions is not None:
versions = method_class.versions
else:
versions = [None]
for action in actions:
for version in versions:
scanner.registry.add(method_class,
action=action,
version=version)
from venusian import attach
attach(method_class, callback, category="method")
return method_class | Decorator to use to mark an API method.
When invoking L{Registry.scan} the classes marked with this decorator
will be added to the registry.
@param method_class: The L{Method} class to register. | entailment |
def hexlify(script, minify=False):
"""
Takes the byte content of a Python script and returns a hex encoded
version of it.
Based on the hexlify script in the microbit-micropython repository.
"""
if not script:
return ''
# Convert line endings in case the file was created on Windows.
script = script.replace(b'\r\n', b'\n')
script = script.replace(b'\r', b'\n')
if minify:
if not can_minify:
raise ValueError("No minifier is available")
script = nudatus.mangle(script.decode('utf-8')).encode('utf-8')
# Add header, pad to multiple of 16 bytes.
data = b'MP' + struct.pack('<H', len(script)) + script
# Padding with null bytes in a 2/3 compatible way
data = data + (b'\x00' * (16 - len(data) % 16))
if len(data) > _MAX_SIZE:
# 'MP' = 2 bytes, script length is another 2 bytes.
raise ValueError("Python script must be less than 8188 bytes.")
# Convert to .hex format.
output = [':020000040003F7'] # extended linear address, 0x0003.
addr = _SCRIPT_ADDR
for i in range(0, len(data), 16):
chunk = data[i:min(i + 16, len(data))]
chunk = struct.pack('>BHB', len(chunk), addr & 0xffff, 0) + chunk
checksum = (-(sum(bytearray(chunk)))) & 0xff
hexline = ':%s%02X' % (strfunc(binascii.hexlify(chunk)).upper(),
checksum)
output.append(hexline)
addr += 16
return '\n'.join(output) | Takes the byte content of a Python script and returns a hex encoded
version of it.
Based on the hexlify script in the microbit-micropython repository. | entailment |
def unhexlify(blob):
"""
Takes a hexlified script and turns it back into a string of Python code.
"""
lines = blob.split('\n')[1:]
output = []
for line in lines:
# Discard the address, length etc. and reverse the hexlification
output.append(binascii.unhexlify(line[9:-2]))
# Check the header is correct ("MP<size>")
if (output[0][0:2].decode('utf-8') != u'MP'):
return ''
# Strip off header
output[0] = output[0][4:]
# and strip any null bytes from the end
output[-1] = output[-1].strip(b'\x00')
script = b''.join(output)
try:
result = script.decode('utf-8')
return result
except UnicodeDecodeError:
# Return an empty string because in certain rare circumstances (where
# the source hex doesn't include any embedded Python code) this
# function may be passed in "raw" bytes from MicroPython.
return '' | Takes a hexlified script and turns it back into a string of Python code. | entailment |
def embed_hex(runtime_hex, python_hex=None):
"""
Given a string representing the MicroPython runtime hex, will embed a
string representing a hex encoded Python script into it.
Returns a string representation of the resulting combination.
Will raise a ValueError if the runtime_hex is missing.
If the python_hex is missing, it will return the unmodified runtime_hex.
"""
if not runtime_hex:
raise ValueError('MicroPython runtime hex required.')
if not python_hex:
return runtime_hex
py_list = python_hex.split()
runtime_list = runtime_hex.split()
embedded_list = []
# The embedded list should be the original runtime with the Python based
# hex embedded two lines from the end.
embedded_list.extend(runtime_list[:-5])
embedded_list.extend(py_list)
embedded_list.extend(runtime_list[-5:])
return '\n'.join(embedded_list) + '\n' | Given a string representing the MicroPython runtime hex, will embed a
string representing a hex encoded Python script into it.
Returns a string representation of the resulting combination.
Will raise a ValueError if the runtime_hex is missing.
If the python_hex is missing, it will return the unmodified runtime_hex. | entailment |
def extract_script(embedded_hex):
"""
Given a hex file containing the MicroPython runtime and an embedded Python
script, will extract the original Python script.
Returns a string containing the original embedded script.
"""
hex_lines = embedded_hex.split('\n')
script_addr_high = hex((_SCRIPT_ADDR >> 16) & 0xffff)[2:].upper().zfill(4)
script_addr_low = hex(_SCRIPT_ADDR & 0xffff)[2:].upper().zfill(4)
start_script = None
within_range = False
# Look for the script start address
for loc, val in enumerate(hex_lines):
if val[0:9] == ':02000004':
# Reached an extended address record, check if within script range
within_range = val[9:13].upper() == script_addr_high
elif within_range and val[0:3] == ':10' and \
val[3:7].upper() == script_addr_low:
start_script = loc
break
if start_script:
# Find the end of the script
end_script = None
for loc, val in enumerate(hex_lines[start_script:]):
if val[9:41] == 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF':
end_script = loc + start_script
break
# Pass the extracted hex through unhexlify
return unhexlify('\n'.join(
hex_lines[start_script - 1:end_script if end_script else -6]))
return '' | Given a hex file containing the MicroPython runtime and an embedded Python
script, will extract the original Python script.
Returns a string containing the original embedded script. | entailment |
def find_microbit():
"""
Returns a path on the filesystem that represents the plugged in BBC
micro:bit that is to be flashed. If no micro:bit is found, it returns
None.
Works on Linux, OSX and Windows. Will raise a NotImplementedError
exception if run on any other operating system.
"""
# Check what sort of operating system we're on.
if os.name == 'posix':
# 'posix' means we're on Linux or OSX (Mac).
# Call the unix "mount" command to list the mounted volumes.
mount_output = check_output('mount').splitlines()
mounted_volumes = [x.split()[2] for x in mount_output]
for volume in mounted_volumes:
if volume.endswith(b'MICROBIT'):
return volume.decode('utf-8') # Return a string not bytes.
elif os.name == 'nt':
# 'nt' means we're on Windows.
def get_volume_name(disk_name):
"""
Each disk or external device connected to windows has an attribute
called "volume name". This function returns the volume name for
the given disk/device.
Code from http://stackoverflow.com/a/12056414
"""
vol_name_buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.kernel32.GetVolumeInformationW(
ctypes.c_wchar_p(disk_name), vol_name_buf,
ctypes.sizeof(vol_name_buf), None, None, None, None, 0)
return vol_name_buf.value
#
# In certain circumstances, volumes are allocated to USB
# storage devices which cause a Windows popup to raise if their
# volume contains no media. Wrapping the check in SetErrorMode
# with SEM_FAILCRITICALERRORS (1) prevents this popup.
#
old_mode = ctypes.windll.kernel32.SetErrorMode(1)
try:
for disk in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
path = '{}:\\'.format(disk)
#
# Don't bother looking if the drive isn't removable
#
if ctypes.windll.kernel32.GetDriveTypeW(path) != 2:
continue
if os.path.exists(path) and \
get_volume_name(path) == 'MICROBIT':
return path
finally:
ctypes.windll.kernel32.SetErrorMode(old_mode)
else:
# No support for unknown operating systems.
raise NotImplementedError('OS "{}" not supported.'.format(os.name)) | Returns a path on the filesystem that represents the plugged in BBC
micro:bit that is to be flashed. If no micro:bit is found, it returns
None.
Works on Linux, OSX and Windows. Will raise a NotImplementedError
exception if run on any other operating system. | entailment |
def save_hex(hex_file, path):
"""
Given a string representation of a hex file, this function copies it to
the specified path thus causing the device mounted at that point to be
flashed.
If the hex_file is empty it will raise a ValueError.
If the filename at the end of the path does not end in '.hex' it will raise
a ValueError.
"""
if not hex_file:
raise ValueError('Cannot flash an empty .hex file.')
if not path.endswith('.hex'):
raise ValueError('The path to flash must be for a .hex file.')
with open(path, 'wb') as output:
output.write(hex_file.encode('ascii')) | Given a string representation of a hex file, this function copies it to
the specified path thus causing the device mounted at that point to be
flashed.
If the hex_file is empty it will raise a ValueError.
If the filename at the end of the path does not end in '.hex' it will raise
a ValueError. | entailment |
def flash(path_to_python=None, paths_to_microbits=None,
path_to_runtime=None, python_script=None, minify=False):
"""
Given a path to or source of a Python file will attempt to create a hex
file and then flash it onto the referenced BBC micro:bit.
If the path_to_python & python_script are unspecified it will simply flash
the unmodified MicroPython runtime onto the device.
If used, the python_script argument should be a bytes object representing
a UTF-8 encoded string. For example::
script = "from microbit import *\\ndisplay.scroll('Hello, World!')"
uflash.flash(python_script=script.encode('utf-8'))
If paths_to_microbits is unspecified it will attempt to find the device's
path on the filesystem automatically.
If the path_to_runtime is unspecified it will use the built in version of
the MicroPython runtime. This feature is useful if a custom build of
MicroPython is available.
If the automatic discovery fails, then it will raise an IOError.
"""
# Check for the correct version of Python.
if not ((sys.version_info[0] == 3 and sys.version_info[1] >= 3) or
(sys.version_info[0] == 2 and sys.version_info[1] >= 7)):
raise RuntimeError('Will only run on Python 2.7, or 3.3 and later.')
# Grab the Python script (if needed).
python_hex = ''
if path_to_python:
if not path_to_python.endswith('.py'):
raise ValueError('Python files must end in ".py".')
with open(path_to_python, 'rb') as python_script:
python_hex = hexlify(python_script.read(), minify)
elif python_script:
python_hex = hexlify(python_script, minify)
runtime = _RUNTIME
# Load the hex for the runtime.
if path_to_runtime:
with open(path_to_runtime) as runtime_file:
runtime = runtime_file.read()
# Generate the resulting hex file.
micropython_hex = embed_hex(runtime, python_hex)
# Find the micro:bit.
if not paths_to_microbits:
found_microbit = find_microbit()
if found_microbit:
paths_to_microbits = [found_microbit]
# Attempt to write the hex file to the micro:bit.
if paths_to_microbits:
for path in paths_to_microbits:
hex_path = os.path.join(path, 'micropython.hex')
print('Flashing Python to: {}'.format(hex_path))
save_hex(micropython_hex, hex_path)
else:
raise IOError('Unable to find micro:bit. Is it plugged in?') | Given a path to or source of a Python file will attempt to create a hex
file and then flash it onto the referenced BBC micro:bit.
If the path_to_python & python_script are unspecified it will simply flash
the unmodified MicroPython runtime onto the device.
If used, the python_script argument should be a bytes object representing
a UTF-8 encoded string. For example::
script = "from microbit import *\\ndisplay.scroll('Hello, World!')"
uflash.flash(python_script=script.encode('utf-8'))
If paths_to_microbits is unspecified it will attempt to find the device's
path on the filesystem automatically.
If the path_to_runtime is unspecified it will use the built in version of
the MicroPython runtime. This feature is useful if a custom build of
MicroPython is available.
If the automatic discovery fails, then it will raise an IOError. | entailment |
def extract(path_to_hex, output_path=None):
"""
Given a path_to_hex file this function will attempt to extract the
embedded script from it and save it either to output_path or stdout
"""
with open(path_to_hex, 'r') as hex_file:
python_script = extract_script(hex_file.read())
if output_path:
with open(output_path, 'w') as output_file:
output_file.write(python_script)
else:
print(python_script) | Given a path_to_hex file this function will attempt to extract the
embedded script from it and save it either to output_path or stdout | entailment |
def watch_file(path, func, *args, **kwargs):
"""
Watch a file for changes by polling its last modification time. Call the
provided function with *args and **kwargs upon modification.
"""
if not path:
raise ValueError('Please specify a file to watch')
print('Watching "{}" for changes'.format(path))
last_modification_time = os.path.getmtime(path)
try:
while True:
time.sleep(1)
new_modification_time = os.path.getmtime(path)
if new_modification_time == last_modification_time:
continue
func(*args, **kwargs)
last_modification_time = new_modification_time
except KeyboardInterrupt:
pass | Watch a file for changes by polling its last modification time. Call the
provided function with *args and **kwargs upon modification. | entailment |
def main(argv=None):
"""
Entry point for the command line tool 'uflash'.
Will print help text if the optional first argument is "help". Otherwise
it will ensure the optional first argument ends in ".py" (the source
Python script).
An optional second argument is used to reference the path to the micro:bit
device. Any more arguments are ignored.
Exceptions are caught and printed for the user.
"""
if not argv:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description=_HELP_TEXT)
parser.add_argument('source', nargs='?', default=None)
parser.add_argument('target', nargs='*', default=None)
parser.add_argument('-r', '--runtime', default=None,
help="Use the referenced MicroPython runtime.")
parser.add_argument('-e', '--extract',
action='store_true',
help=("Extract python source from a hex file"
" instead of creating the hex file."), )
parser.add_argument('-w', '--watch',
action='store_true',
help='Watch the source file for changes.')
parser.add_argument('-m', '--minify',
action='store_true',
help='Minify the source')
parser.add_argument('--version', action='version',
version='%(prog)s ' + get_version())
args = parser.parse_args(argv)
if args.extract:
try:
extract(args.source, args.target)
except Exception as ex:
error_message = "Error extracting {source}: {error!s}"
print(error_message.format(source=args.source, error=ex),
file=sys.stderr)
sys.exit(1)
elif args.watch:
try:
watch_file(args.source, flash,
path_to_python=args.source,
paths_to_microbits=args.target,
path_to_runtime=args.runtime)
except Exception as ex:
error_message = "Error watching {source}: {error!s}"
print(error_message.format(source=args.source, error=ex),
file=sys.stderr)
sys.exit(1)
else:
try:
flash(path_to_python=args.source, paths_to_microbits=args.target,
path_to_runtime=args.runtime, minify=args.minify)
except Exception as ex:
error_message = (
"Error flashing {source} to {target}{runtime}: {error!s}"
)
source = args.source
target = args.target if args.target else "microbit"
if args.runtime:
runtime = "with runtime {runtime}".format(runtime=args.runtime)
else:
runtime = ""
print(error_message.format(source=source, target=target,
runtime=runtime, error=ex),
file=sys.stderr)
sys.exit(1) | Entry point for the command line tool 'uflash'.
Will print help text if the optional first argument is "help". Otherwise
it will ensure the optional first argument ends in ".py" (the source
Python script).
An optional second argument is used to reference the path to the micro:bit
device. Any more arguments are ignored.
Exceptions are caught and printed for the user. | entailment |
def timing(func):
"""Measure the execution time of a function call and print the result."""
@functools.wraps(func)
def wrap(*args, **kw):
t0 = time()
result = func(*args, **kw)
t1 = time()
print('func:%r args:[%r, %r] took: %2.4f sec' %
(func.__name__, args, kw, t1 - t0))
return result
return wrap | Measure the execution time of a function call and print the result. | entailment |
def deprecated(func):
"""
Mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
if sys.version_info < (3, 0):
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
else:
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
filename=func.__code__.co_filename,
lineno=func.__code__.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func | Mark functions as deprecated.
It will result in a warning being emitted when the function is used. | entailment |
def get_currency(currency_str):
"""
Convert an identifier for a currency into a currency object.
Parameters
----------
currency_str : str
Returns
-------
currency : Currency
"""
path = 'units/currencies.csv' # always use slash in Python packages
filepath = pkg_resources.resource_filename('mpu', path)
with open(filepath, 'r') as fp:
reader = csv.reader(fp, delimiter=',', quotechar='"')
next(reader, None) # skip the headers
for row in reader:
is_currency = currency_str in [row[0], row[1], row[2]]
if is_currency:
entity = row[0]
name = row[1]
code = row[2]
numeric_code = row[3]
symbol = row[4]
if len(row[5]) == 0:
exponent = None
else:
exponent = int(row[5])
if len(row[6]) > 0:
withdrawal_date = row[6]
else:
withdrawal_date = None
subunits = row[7]
return Currency(name=name,
code=code,
numeric_code=numeric_code,
symbol=symbol,
exponent=exponent,
entities=[entity],
withdrawal_date=withdrawal_date,
subunits=subunits)
raise ValueError('Could not find currency \'{}\''.format(currency_str)) | Convert an identifier for a currency into a currency object.
Parameters
----------
currency_str : str
Returns
-------
currency : Currency | entailment |
def from_json(cls, json):
"""Create a Currency object from a JSON dump."""
obj = cls(name=json['name'],
code=json['code'],
numeric_code=json['numeric_code'],
symbol=json['symbol'],
exponent=json['exponent'],
entities=json['entities'],
withdrawal_date=json['withdrawal_date'],
subunits=json['subunits'])
return obj | Create a Currency object from a JSON dump. | entailment |
def print_table(table):
"""
Print as a table.
I recommend looking at [`tabulate`](https://pypi.org/project/tabulate/).
Parameters
----------
table : list
Examples
--------
>>> print_table([[1, 2, 3], [41, 0, 1]])
1 2 3
41 0 1
"""
table = [[str(cell) for cell in row] for row in table]
column_widths = [len(cell) for cell in table[0]]
for row in table:
for x, cell in enumerate(row):
column_widths[x] = max(column_widths[x], len(cell))
formatters = []
for width in column_widths:
formatters.append('{:>' + str(width) + '}')
formatter = ' '.join(formatters)
for row in table:
print(formatter.format(*row)) | Print as a table.
I recommend looking at [`tabulate`](https://pypi.org/project/tabulate/).
Parameters
----------
table : list
Examples
--------
>>> print_table([[1, 2, 3], [41, 0, 1]])
1 2 3
41 0 1 | entailment |
def is_email(potential_email_address):
"""
Check if potential_email_address is a valid e-mail address.
Please note that this function has no false-negatives but many
false-positives. So if it returns that the input is not a valid
e-mail adress, it certainly isn't. If it returns True, it might still be
invalid. For example, the domain could not be registered.
Parameters
----------
potential_email_address : str
Returns
-------
is_email : bool
Examples
--------
>>> is_email('')
False
>>> is_email('info@martin-thoma.de')
True
>>> is_email('info@math.martin-thoma.de')
True
>>> is_email('Martin Thoma <info@martin-thoma.de>')
False
>>> is_email('info@martin-thoma')
False
"""
context, mail = parseaddr(potential_email_address)
first_condition = len(context) == 0 and len(mail) != 0
dot_after_at = ('@' in potential_email_address and
'.' in potential_email_address.split('@')[1])
return first_condition and dot_after_at | Check if potential_email_address is a valid e-mail address.
Please note that this function has no false-negatives but many
false-positives. So if it returns that the input is not a valid
e-mail adress, it certainly isn't. If it returns True, it might still be
invalid. For example, the domain could not be registered.
Parameters
----------
potential_email_address : str
Returns
-------
is_email : bool
Examples
--------
>>> is_email('')
False
>>> is_email('info@martin-thoma.de')
True
>>> is_email('info@math.martin-thoma.de')
True
>>> is_email('Martin Thoma <info@martin-thoma.de>')
False
>>> is_email('info@martin-thoma')
False | entailment |
def str2bool(string_, default='raise'):
"""
Convert a string to a bool.
Parameters
----------
string_ : str
default : {'raise', False}
Default behaviour if none of the "true" strings is detected.
Returns
-------
boolean : bool
Examples
--------
>>> str2bool('True')
True
>>> str2bool('1')
True
>>> str2bool('0')
False
"""
true = ['true', 't', '1', 'y', 'yes', 'enabled', 'enable', 'on']
false = ['false', 'f', '0', 'n', 'no', 'disabled', 'disable', 'off']
if string_.lower() in true:
return True
elif string_.lower() in false or (not default):
return False
else:
raise ValueError('The value \'{}\' cannot be mapped to boolean.'
.format(string_)) | Convert a string to a bool.
Parameters
----------
string_ : str
default : {'raise', False}
Default behaviour if none of the "true" strings is detected.
Returns
-------
boolean : bool
Examples
--------
>>> str2bool('True')
True
>>> str2bool('1')
True
>>> str2bool('0')
False | entailment |
def str2bool_or_none(string_, default='raise'):
"""
Convert a string to a bool or to None.
Parameters
----------
string_ : str
default : {'raise', False}
Default behaviour if none of the "true" or "none" strings is detected.
Returns
-------
bool_or_none : bool or None
Examples
--------
>>> str2bool_or_none('True')
True
>>> str2bool_or_none('1')
True
>>> str2bool_or_none('0')
False
>>> str2bool_or_none('undefined')
"""
if is_none(string_, default=False):
return None
else:
return str2bool(string_, default) | Convert a string to a bool or to None.
Parameters
----------
string_ : str
default : {'raise', False}
Default behaviour if none of the "true" or "none" strings is detected.
Returns
-------
bool_or_none : bool or None
Examples
--------
>>> str2bool_or_none('True')
True
>>> str2bool_or_none('1')
True
>>> str2bool_or_none('0')
False
>>> str2bool_or_none('undefined') | entailment |
def is_none(string_, default='raise'):
"""
Check if a string is equivalent to None.
Parameters
----------
string_ : str
default : {'raise', False}
Default behaviour if none of the "None" strings is detected.
Returns
-------
is_none : bool
Examples
--------
>>> is_none('2', default=False)
False
>>> is_none('undefined', default=False)
True
"""
none = ['none', 'undefined', 'unknown', 'null', '']
if string_.lower() in none:
return True
elif not default:
return False
else:
raise ValueError('The value \'{}\' cannot be mapped to none.'
.format(string_)) | Check if a string is equivalent to None.
Parameters
----------
string_ : str
default : {'raise', False}
Default behaviour if none of the "None" strings is detected.
Returns
-------
is_none : bool
Examples
--------
>>> is_none('2', default=False)
False
>>> is_none('undefined', default=False)
True | entailment |
def is_iban(potential_iban):
"""
Check if a string is a valid IBAN number.
IBAN is described in ISO 13616-1:2007 Part 1.
Spaces are ignored.
# CODE
0 = always zero
b = BIC or National Bank code
c = Account number
i = holder's kennitala (national identification number)
k = IBAN check digits
n = Branch number
t = Account type
x = National check digit or character
Examples
--------
>>> is_iban('DE89 3704 0044 0532 0130 00')
True
>>> is_iban('DE89 3704 0044 0532 0130 01')
False
"""
path = 'data/iban.csv' # always use slash in Python packages
filepath = pkg_resources.resource_filename('mpu', path)
data = mpu.io.read(filepath, delimiter=';', format='dicts')
potential_iban = potential_iban.replace(' ', '') # Remove spaces
if len(potential_iban) < min([int(el['length']) for el in data]):
return False
country = None
for element in data:
if element['iban_fields'][:2] == potential_iban[:2]:
country = element
break
if country is None:
return False
if len(potential_iban) != int(country['length']):
return False
if country['country_en'] == 'Germany':
checksum_val = [value
for field_type, value in
zip(country['iban_fields'], potential_iban)
if field_type == 'k']
checksum_val = ''.join(checksum_val)
checksum_exp = _calculate_german_iban_checksum(potential_iban,
country['iban_fields'])
return checksum_val == checksum_exp
return True | Check if a string is a valid IBAN number.
IBAN is described in ISO 13616-1:2007 Part 1.
Spaces are ignored.
# CODE
0 = always zero
b = BIC or National Bank code
c = Account number
i = holder's kennitala (national identification number)
k = IBAN check digits
n = Branch number
t = Account type
x = National check digit or character
Examples
--------
>>> is_iban('DE89 3704 0044 0532 0130 00')
True
>>> is_iban('DE89 3704 0044 0532 0130 01')
False | entailment |
def _calculate_german_iban_checksum(iban,
iban_fields='DEkkbbbbbbbbcccccccccc'):
"""
Calculate the checksam of the German IBAN format.
Examples
--------
>>> iban = 'DE41500105170123456789'
>>> _calculate_german_iban_checksum(iban)
'41'
"""
number = [value
for field_type, value in zip(iban_fields, iban)
if field_type in ['b', 'c']]
translate = {'0': '0', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5',
'6': '6', '7': '7', '8': '8', '9': '9'}
for i in range(ord('A'), ord('Z') + 1):
translate[chr(i)] = str(i - ord('A') + 10)
for val in 'DE00':
translated = translate[val]
for char in translated:
number.append(char)
number = sum(int(value) * 10**i for i, value in enumerate(number[::-1]))
checksum = 98 - (number % 97)
return str(checksum) | Calculate the checksam of the German IBAN format.
Examples
--------
>>> iban = 'DE41500105170123456789'
>>> _calculate_german_iban_checksum(iban)
'41' | entailment |
def human_readable_bytes(nb_bytes, suffix='B'):
"""
Convert a byte number into a human readable format.
Parameters
----------
nb_bytes : number
suffix : str, optional (default: "B")
Returns
-------
size_str : str
Examples
--------
>>> human_readable_bytes(123)
'123.0 B'
>>> human_readable_bytes(1025)
'1.0 KiB'
>>> human_readable_bytes(9671406556917033397649423)
'8.0 YiB'
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(nb_bytes) < 1024.0:
return '%3.1f %s%s' % (nb_bytes, unit, suffix)
nb_bytes /= 1024.0
return '%.1f %s%s' % (nb_bytes, 'Yi', suffix) | Convert a byte number into a human readable format.
Parameters
----------
nb_bytes : number
suffix : str, optional (default: "B")
Returns
-------
size_str : str
Examples
--------
>>> human_readable_bytes(123)
'123.0 B'
>>> human_readable_bytes(1025)
'1.0 KiB'
>>> human_readable_bytes(9671406556917033397649423)
'8.0 YiB' | entailment |
def list_files(bucket, profile_name=None):
"""
List up to 1000 files in a bucket.
Parameters
----------
bucket : str
profile_name : str, optional
AWS profile
Returns
-------
s3_paths : List[str]
"""
session = boto3.Session(profile_name=profile_name)
conn = session.client('s3')
keys = []
ret = conn.list_objects(Bucket=bucket)
print(ret)
if 'Contents' not in ret:
return []
# Make this a generator in future and use the marker:
# https://boto3.readthedocs.io/en/latest/reference/services/
# s3.html#S3.Client.list_objects
for key in conn.list_objects(Bucket=bucket)['Contents']:
keys.append('s3://' + bucket + '/' + key['Key'])
return keys | List up to 1000 files in a bucket.
Parameters
----------
bucket : str
profile_name : str, optional
AWS profile
Returns
-------
s3_paths : List[str] | entailment |
def s3_read(source, profile_name=None):
"""
Read a file from an S3 source.
Parameters
----------
source : str
Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar'
profile_name : str, optional
AWS profile
Returns
-------
content : bytes
Raises
------
botocore.exceptions.NoCredentialsError
Botocore is not able to find your credentials. Either specify
profile_name or add the environment variables AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN.
See https://boto3.readthedocs.io/en/latest/guide/configuration.html
"""
session = boto3.Session(profile_name=profile_name)
s3 = session.client('s3')
bucket_name, key = _s3_path_split(source)
s3_object = s3.get_object(Bucket=bucket_name, Key=key)
body = s3_object['Body']
return body.read() | Read a file from an S3 source.
Parameters
----------
source : str
Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar'
profile_name : str, optional
AWS profile
Returns
-------
content : bytes
Raises
------
botocore.exceptions.NoCredentialsError
Botocore is not able to find your credentials. Either specify
profile_name or add the environment variables AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN.
See https://boto3.readthedocs.io/en/latest/guide/configuration.html | entailment |
def s3_download(source, destination,
exists_strategy=ExistsStrategy.RAISE,
profile_name=None):
"""
Copy a file from an S3 source to a local destination.
Parameters
----------
source : str
Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar'
destination : str
exists_strategy : {'raise', 'replace', 'abort'}
What is done when the destination already exists?
* `ExistsStrategy.RAISE` means a RuntimeError is raised,
* `ExistsStrategy.REPLACE` means the local file is replaced,
* `ExistsStrategy.ABORT` means the download is not done.
profile_name : str, optional
AWS profile
Raises
------
botocore.exceptions.NoCredentialsError
Botocore is not able to find your credentials. Either specify
profile_name or add the environment variables AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN.
See https://boto3.readthedocs.io/en/latest/guide/configuration.html
"""
if not isinstance(exists_strategy, ExistsStrategy):
raise ValueError('exists_strategy \'{}\' is not in {}'
.format(exists_strategy, ExistsStrategy))
session = boto3.Session(profile_name=profile_name)
s3 = session.resource('s3')
bucket_name, key = _s3_path_split(source)
if os.path.isfile(destination):
if exists_strategy is ExistsStrategy.RAISE:
raise RuntimeError('File \'{}\' already exists.'
.format(destination))
elif exists_strategy is ExistsStrategy.ABORT:
return
s3.Bucket(bucket_name).download_file(key, destination) | Copy a file from an S3 source to a local destination.
Parameters
----------
source : str
Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar'
destination : str
exists_strategy : {'raise', 'replace', 'abort'}
What is done when the destination already exists?
* `ExistsStrategy.RAISE` means a RuntimeError is raised,
* `ExistsStrategy.REPLACE` means the local file is replaced,
* `ExistsStrategy.ABORT` means the download is not done.
profile_name : str, optional
AWS profile
Raises
------
botocore.exceptions.NoCredentialsError
Botocore is not able to find your credentials. Either specify
profile_name or add the environment variables AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN.
See https://boto3.readthedocs.io/en/latest/guide/configuration.html | entailment |
def s3_upload(source, destination, profile_name=None):
"""
Copy a file from a local source to an S3 destination.
Parameters
----------
source : str
destination : str
Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar'
profile_name : str, optional
AWS profile
"""
session = boto3.Session(profile_name=profile_name)
s3 = session.resource('s3')
bucket_name, key = _s3_path_split(destination)
with open(source, 'rb') as data:
s3.Bucket(bucket_name).put_object(Key=key, Body=data) | Copy a file from a local source to an S3 destination.
Parameters
----------
source : str
destination : str
Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar'
profile_name : str, optional
AWS profile | entailment |
def _s3_path_split(s3_path):
"""
Split an S3 path into bucket and key.
Parameters
----------
s3_path : str
Returns
-------
splitted : (str, str)
(bucket, key)
Examples
--------
>>> _s3_path_split('s3://my-bucket/foo/bar.jpg')
S3Path(bucket_name='my-bucket', key='foo/bar.jpg')
"""
if not s3_path.startswith('s3://'):
raise ValueError('s3_path is expected to start with \'s3://\', '
'but was {}'.format(s3_path))
bucket_key = s3_path[len('s3://'):]
bucket_name, key = bucket_key.split('/', 1)
return S3Path(bucket_name, key) | Split an S3 path into bucket and key.
Parameters
----------
s3_path : str
Returns
-------
splitted : (str, str)
(bucket, key)
Examples
--------
>>> _s3_path_split('s3://my-bucket/foo/bar.jpg')
S3Path(bucket_name='my-bucket', key='foo/bar.jpg') | entailment |
def get_meta(filepath):
"""
Get meta-information of an image.
Parameters
----------
filepath : str
Returns
-------
meta : dict
"""
meta = {}
try:
from PIL import Image
with Image.open(filepath) as img:
width, height = img.size
meta['width'] = width
meta['height'] = height
meta['channels'] = len(img.mode) # RGB, RGBA - does this always work?
except ImportError:
pass
# Get times - creation, last edit, last open
meta['file'] = mpu.io.get_file_meta(filepath)
return meta | Get meta-information of an image.
Parameters
----------
filepath : str
Returns
-------
meta : dict | entailment |
def flatten(iterable, string_flattening=False):
"""
Flatten an given iterable of iterables into one list.
Parameters
----------
iterable : iterable
string_flattening : bool
If this is False, then strings are NOT flattened
Returns
-------
flat_list : List
Examples
--------
>>> flatten([1, [2, [3]]])
[1, 2, 3]
>>> flatten(((1, 2), (3, 4), (5, 6)))
[1, 2, 3, 4, 5, 6]
>>> flatten(EList([EList([1, 2]), (3, [4, [[5]]])]))
[1, 2, 3, 4, 5]
"""
flat_list = []
for item in iterable:
is_iterable = (isinstance(item, collections.Iterable) and
(string_flattening or
(not string_flattening and not isinstance(item, str))
))
if is_iterable:
flat_list.extend(flatten(item))
else:
flat_list.append(item)
return flat_list | Flatten an given iterable of iterables into one list.
Parameters
----------
iterable : iterable
string_flattening : bool
If this is False, then strings are NOT flattened
Returns
-------
flat_list : List
Examples
--------
>>> flatten([1, [2, [3]]])
[1, 2, 3]
>>> flatten(((1, 2), (3, 4), (5, 6)))
[1, 2, 3, 4, 5, 6]
>>> flatten(EList([EList([1, 2]), (3, [4, [[5]]])]))
[1, 2, 3, 4, 5] | entailment |
def dict_merge(dict_left, dict_right, merge_method='take_left_shallow'):
"""
Merge two dictionaries.
This method does NOT modify dict_left or dict_right!
Apply this method multiple times if the dictionary is nested.
Parameters
----------
dict_left : dict
dict_right: dict
merge_method : {'take_left_shallow', 'take_left_deep', \
'take_right_shallow', 'take_right_deep', \
'sum'}
* take_left_shallow: Use both dictinaries. If both have the same key,
take the value of dict_left
* take_left_deep : If both dictionaries have the same key and the value
is a dict for both again, then merge those sub-dictionaries
* take_right_shallow : See take_left_shallow
* take_right_deep : See take_left_deep
* sum : sum up both dictionaries. If one does not have a value for a
key of the other, assume the missing value to be zero.
Returns
-------
merged_dict : dict
Examples
--------
>>> dict_merge({'a': 1, 'b': 2}, {'c': 3}) == {'a': 1, 'b': 2, 'c': 3}
True
>>> out = dict_merge({'a': {'A': 1}},
... {'a': {'A': 2, 'B': 3}}, 'take_left_deep')
>>> expected = {'a': {'A': 1, 'B': 3}}
>>> out == expected
True
>>> out = dict_merge({'a': {'A': 1}},
... {'a': {'A': 2, 'B': 3}}, 'take_left_shallow')
>>> expected = {'a': {'A': 1}}
>>> out == expected
True
>>> out = dict_merge({'a': 1, 'b': {'c': 2}},
... {'b': {'c': 3, 'd': 4}},
... 'sum')
>>> expected = {'a': 1, 'b': {'c': 5, 'd': 4}}
>>> out == expected
True
"""
new_dict = {}
if merge_method in ['take_right_shallow', 'take_right_deep']:
return _dict_merge_right(dict_left, dict_right, merge_method)
elif merge_method == 'take_left_shallow':
return dict_merge(dict_right, dict_left, 'take_right_shallow')
elif merge_method == 'take_left_deep':
return dict_merge(dict_right, dict_left, 'take_right_deep')
elif merge_method == 'sum':
new_dict = deepcopy(dict_left)
for key, value in dict_right.items():
if key not in new_dict:
new_dict[key] = value
else:
recurse = isinstance(value, dict)
if recurse:
new_dict[key] = dict_merge(dict_left[key],
dict_right[key],
merge_method='sum')
else:
new_dict[key] = dict_left[key] + dict_right[key]
return new_dict
else:
raise NotImplementedError('merge_method=\'{}\' is not known.'
.format(merge_method)) | Merge two dictionaries.
This method does NOT modify dict_left or dict_right!
Apply this method multiple times if the dictionary is nested.
Parameters
----------
dict_left : dict
dict_right: dict
merge_method : {'take_left_shallow', 'take_left_deep', \
'take_right_shallow', 'take_right_deep', \
'sum'}
* take_left_shallow: Use both dictinaries. If both have the same key,
take the value of dict_left
* take_left_deep : If both dictionaries have the same key and the value
is a dict for both again, then merge those sub-dictionaries
* take_right_shallow : See take_left_shallow
* take_right_deep : See take_left_deep
* sum : sum up both dictionaries. If one does not have a value for a
key of the other, assume the missing value to be zero.
Returns
-------
merged_dict : dict
Examples
--------
>>> dict_merge({'a': 1, 'b': 2}, {'c': 3}) == {'a': 1, 'b': 2, 'c': 3}
True
>>> out = dict_merge({'a': {'A': 1}},
... {'a': {'A': 2, 'B': 3}}, 'take_left_deep')
>>> expected = {'a': {'A': 1, 'B': 3}}
>>> out == expected
True
>>> out = dict_merge({'a': {'A': 1}},
... {'a': {'A': 2, 'B': 3}}, 'take_left_shallow')
>>> expected = {'a': {'A': 1}}
>>> out == expected
True
>>> out = dict_merge({'a': 1, 'b': {'c': 2}},
... {'b': {'c': 3, 'd': 4}},
... 'sum')
>>> expected = {'a': 1, 'b': {'c': 5, 'd': 4}}
>>> out == expected
True | entailment |
def _dict_merge_right(dict_left, dict_right, merge_method):
"""See documentation of mpu.datastructures.dict_merge."""
new_dict = deepcopy(dict_left)
for key, value in dict_right.items():
if key not in new_dict:
new_dict[key] = value
else:
recurse = (merge_method == 'take_right_deep' and
isinstance(dict_left[key], dict) and
isinstance(dict_right[key], dict))
if recurse:
new_dict[key] = dict_merge(dict_left[key],
dict_right[key],
merge_method='take_right_deep')
else:
new_dict[key] = value
return new_dict | See documentation of mpu.datastructures.dict_merge. | entailment |
def set_dict_value(dictionary, keys, value):
"""
Set a value in a (nested) dictionary by defining a list of keys.
.. note:: Side-effects
This function does not make a copy of dictionary, but directly
edits it.
Parameters
----------
dictionary : dict
keys : List[Any]
value : object
Returns
-------
dictionary : dict
Examples
--------
>>> d = {'a': {'b': 'c', 'd': 'e'}}
>>> expected = {'a': {'b': 'foobar', 'd': 'e'}}
>>> set_dict_value(d, ['a', 'b'], 'foobar') == expected
True
"""
orig = dictionary
for key in keys[:-1]:
dictionary = dictionary.setdefault(key, {})
dictionary[keys[-1]] = value
return orig | Set a value in a (nested) dictionary by defining a list of keys.
.. note:: Side-effects
This function does not make a copy of dictionary, but directly
edits it.
Parameters
----------
dictionary : dict
keys : List[Any]
value : object
Returns
-------
dictionary : dict
Examples
--------
>>> d = {'a': {'b': 'c', 'd': 'e'}}
>>> expected = {'a': {'b': 'foobar', 'd': 'e'}}
>>> set_dict_value(d, ['a', 'b'], 'foobar') == expected
True | entailment |
def does_keychain_exist(dict_, list_):
"""
Check if a sequence of keys exist in a nested dictionary.
Parameters
----------
dict_ : Dict[str/int/tuple, Any]
list_ : List[str/int/tuple]
Returns
-------
keychain_exists : bool
Examples
--------
>>> d = {'a': {'b': {'c': 'd'}}}
>>> l_exists = ['a', 'b']
>>> does_keychain_exist(d, l_exists)
True
>>> l_no_existant = ['a', 'c']
>>> does_keychain_exist(d, l_no_existant)
False
"""
for key in list_:
if key not in dict_:
return False
dict_ = dict_[key]
return True | Check if a sequence of keys exist in a nested dictionary.
Parameters
----------
dict_ : Dict[str/int/tuple, Any]
list_ : List[str/int/tuple]
Returns
-------
keychain_exists : bool
Examples
--------
>>> d = {'a': {'b': {'c': 'd'}}}
>>> l_exists = ['a', 'b']
>>> does_keychain_exist(d, l_exists)
True
>>> l_no_existant = ['a', 'c']
>>> does_keychain_exist(d, l_no_existant)
False | entailment |
def remove_indices(self, indices):
"""
Remove rows by which have the given indices.
Parameters
----------
indices : list
Returns
-------
filtered_list : EList
"""
new_list = []
for index, element in enumerate(self):
if index not in indices:
new_list.append(element)
return EList(new_list) | Remove rows by which have the given indices.
Parameters
----------
indices : list
Returns
-------
filtered_list : EList | entailment |
def get_all_files(root, followlinks=False):
"""
Get all files within the given root directory.
Note that this list is not ordered.
Parameters
----------
root : str
Path to a directory
followlinks : bool, optional (default: False)
Returns
-------
filepaths : list
List of absolute paths to files
"""
filepaths = []
for path, _, files in os.walk(root, followlinks=followlinks):
for name in files:
filepaths.append(os.path.abspath(os.path.join(path, name)))
return filepaths | Get all files within the given root directory.
Note that this list is not ordered.
Parameters
----------
root : str
Path to a directory
followlinks : bool, optional (default: False)
Returns
-------
filepaths : list
List of absolute paths to files | entailment |
def get_from_package(package_name, path):
"""
Get the absolute path to a file in a package.
Parameters
----------
package_name : str
e.g. 'mpu'
path : str
Path within a package
Returns
-------
filepath : str
"""
filepath = pkg_resources.resource_filename(package_name, path)
return os.path.abspath(filepath) | Get the absolute path to a file in a package.
Parameters
----------
package_name : str
e.g. 'mpu'
path : str
Path within a package
Returns
-------
filepath : str | entailment |
def example_df():
"""Create an example dataframe."""
country_names = ['Germany',
'France',
'Indonesia',
'Ireland',
'Spain',
'Vatican']
population = [82521653, 66991000, 255461700, 4761865, 46549045, None]
population_time = [dt.datetime(2016, 12, 1),
dt.datetime(2017, 1, 1),
dt.datetime(2017, 1, 1),
None, # Ireland
dt.datetime(2017, 6, 1), # Spain
None,
]
euro = [True, True, False, True, True, True]
df = pd.DataFrame({'country': country_names,
'population': population,
'population_time': population_time,
'EUR': euro})
df = df[['country', 'population', 'population_time', 'EUR']]
return df | Create an example dataframe. | entailment |
def describe(df, dtype=None):
"""
Print a description of a Pandas dataframe.
Parameters
----------
df : Pandas.DataFrame
dtype : dict
Maps column names to types
"""
if dtype is None:
dtype = {}
print('Number of datapoints: {datapoints}'.format(datapoints=len(df)))
column_info, column_info_meta = _get_column_info(df, dtype)
if len(column_info['int']) > 0:
_describe_int(df, column_info)
if len(column_info['float']) > 0:
_describe_float(df, column_info)
if len(column_info['category']) > 0:
_describe_category(df, column_info, column_info_meta)
if len(column_info['time']) > 0:
_describe_time(df, column_info, column_info_meta)
if len(column_info['other']) > 0:
_describe_other(df, column_info, column_info_meta)
column_types = {}
for column_type, columns in column_info.items():
for column_name in columns:
if column_type == 'other':
column_type = 'str'
column_types[column_name] = column_type
return column_types | Print a description of a Pandas dataframe.
Parameters
----------
df : Pandas.DataFrame
dtype : dict
Maps column names to types | entailment |
def indices2one_hot(indices, nb_classes):
"""
Convert an iterable of indices to one-hot encoded list.
You might also be interested in sklearn.preprocessing.OneHotEncoder
Parameters
----------
indices : iterable
iterable of indices
nb_classes : int
Number of classes
dtype : type
Returns
-------
one_hot : list
Examples
--------
>>> indices2one_hot([0, 1, 1], 3)
[[1, 0, 0], [0, 1, 0], [0, 1, 0]]
>>> indices2one_hot([0, 1, 1], 2)
[[1, 0], [0, 1], [0, 1]]
"""
if nb_classes < 1:
raise ValueError('nb_classes={}, but positive number expected'
.format(nb_classes))
one_hot = []
for index in indices:
one_hot.append([0] * nb_classes)
one_hot[-1][index] = 1
return one_hot | Convert an iterable of indices to one-hot encoded list.
You might also be interested in sklearn.preprocessing.OneHotEncoder
Parameters
----------
indices : iterable
iterable of indices
nb_classes : int
Number of classes
dtype : type
Returns
-------
one_hot : list
Examples
--------
>>> indices2one_hot([0, 1, 1], 3)
[[1, 0, 0], [0, 1, 0], [0, 1, 0]]
>>> indices2one_hot([0, 1, 1], 2)
[[1, 0], [0, 1], [0, 1]] | entailment |
def one_hot2indices(one_hots):
"""
Convert an iterable of one-hot encoded targets to a list of indices.
Parameters
----------
one_hot : list
Returns
-------
indices : list
Examples
--------
>>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[0, 1, 2]
>>> one_hot2indices([[1, 0], [1, 0], [0, 1]])
[0, 0, 1]
"""
indices = []
for one_hot in one_hots:
indices.append(argmax(one_hot))
return indices | Convert an iterable of one-hot encoded targets to a list of indices.
Parameters
----------
one_hot : list
Returns
-------
indices : list
Examples
--------
>>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[0, 1, 2]
>>> one_hot2indices([[1, 0], [1, 0], [0, 1]])
[0, 0, 1] | entailment |
def generate_primes():
"""
Generate an infinite sequence of prime numbers.
The algorithm was originally written by David Eppstein, UC Irvine. See:
http://code.activestate.com/recipes/117119/
Examples
--------
>>> g = generate_primes()
>>> next(g)
2
>>> next(g)
3
>>> next(g)
5
"""
divisors = {} # map number to at least one divisor
candidate = 2 # next potential prime
while True:
if candidate in divisors:
# candidate is composite. divisors[candidate] is the list of primes
# that divide it. Since we've reached candidate, we no longer need
# it in the map, but we'll mark the next multiples of its witnesses
# to prepare for larger numbers
for p in divisors[candidate]:
divisors.setdefault(p + candidate, []).append(p)
del divisors[candidate]
else:
# candidate is a new prime
yield candidate
# mark its first multiple that isn't
# already marked in previous iterations
divisors[candidate * candidate] = [candidate]
candidate += 1 | Generate an infinite sequence of prime numbers.
The algorithm was originally written by David Eppstein, UC Irvine. See:
http://code.activestate.com/recipes/117119/
Examples
--------
>>> g = generate_primes()
>>> next(g)
2
>>> next(g)
3
>>> next(g)
5 | entailment |
def factorize(number):
"""
Get the prime factors of an integer except for 1.
Parameters
----------
number : int
Returns
-------
primes : iterable
Examples
--------
>>> factorize(-17)
[-1, 17]
>>> factorize(8)
[2, 2, 2]
>>> factorize(3**25)
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
>>> factorize(1)
[1]
"""
if not isinstance(number, int):
raise ValueError('integer expected, but type(number)={}'
.format(type(number)))
if number < 0:
return [-1] + factorize(number * (-1))
elif number == 0:
raise ValueError('All primes are prime factors of 0.')
else:
for i in range(2, int(math_stl.ceil(number**0.5)) + 1):
if number % i == 0:
if i == number:
return [i]
else:
return [i] + factorize(int(number / i))
return [number] | Get the prime factors of an integer except for 1.
Parameters
----------
number : int
Returns
-------
primes : iterable
Examples
--------
>>> factorize(-17)
[-1, 17]
>>> factorize(8)
[2, 2, 2]
>>> factorize(3**25)
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
>>> factorize(1)
[1] | entailment |
def argmax(iterable):
"""
Find the first index of the biggest value in the iterable.
Parameters
----------
iterable : iterable
Returns
-------
argmax : int
Examples
--------
>>> argmax([0, 0, 0])
0
>>> argmax([1, 0, 0])
0
>>> argmax([0, 1, 0])
1
>>> argmax([])
"""
max_value = None
max_index = None
for index, value in enumerate(iterable):
if (max_value is None) or max_value < value:
max_value = value
max_index = index
return max_index | Find the first index of the biggest value in the iterable.
Parameters
----------
iterable : iterable
Returns
-------
argmax : int
Examples
--------
>>> argmax([0, 0, 0])
0
>>> argmax([1, 0, 0])
0
>>> argmax([0, 1, 0])
1
>>> argmax([]) | entailment |
def round_down(x, decimal_places):
"""
Round a float down to decimal_places.
Parameters
----------
x : float
decimal_places : int
Returns
-------
rounded_float : float
Examples
--------
>>> round_down(1.23456, 3)
1.234
>>> round_down(1.23456, 2)
1.23
"""
from math import floor
d = int('1' + ('0' * decimal_places))
return floor(x * d) / d | Round a float down to decimal_places.
Parameters
----------
x : float
decimal_places : int
Returns
-------
rounded_float : float
Examples
--------
>>> round_down(1.23456, 3)
1.234
>>> round_down(1.23456, 2)
1.23 | entailment |
def add_time(datetime_obj, days=0, hours=0, minutes=0, seconds=0):
"""
Add time to a timezone-aware datetime object.
This keeps the timezone correct, even if it changes due to daylight
saving time (DST).
Parameters
----------
datetime_obj : datetime.datetime
days : int
hours : int
minutes : int
seconds : int
Returns
-------
datetime : datetime.datetime
"""
seconds += minutes * 60
seconds += hours * 60**2
seconds += days * 24 * 60**2
t14 = datetime_obj + dt.timedelta(seconds=seconds) # Invalid timezone!
t14 = t14.astimezone(pytz.utc).astimezone(t14.tzinfo) # Fix the timezone
return t14 | Add time to a timezone-aware datetime object.
This keeps the timezone correct, even if it changes due to daylight
saving time (DST).
Parameters
----------
datetime_obj : datetime.datetime
days : int
hours : int
minutes : int
seconds : int
Returns
-------
datetime : datetime.datetime | entailment |
def generate(minimum, maximum, local_random=random.Random()):
"""
Generate a random date.
The generated dates are uniformly distributed.
Parameters
----------
minimum : datetime object
maximum : datetime object
local_random : random.Random
Returns
-------
generated_date : datetime object
Examples
--------
>>> import random; r = random.Random(); r.seed(0)
>>> from datetime import datetime
>>> generate(datetime(2018, 1, 1), datetime(2018, 1, 2), local_random=r)
datetime.datetime(2018, 1, 1, 20, 15, 58, 47972)
>>> generate(datetime(2018, 1, 1), datetime(2018, 1, 2), local_random=r)
datetime.datetime(2018, 1, 1, 18, 11, 27, 260414)
"""
if not (minimum < maximum):
raise ValueError('{} is not smaller than {}'.format(minimum, maximum))
# Python 3 allows direct multiplication of timedelta with a float, but
# Python 2.7 does not. Hence this work-around.
time_d = maximum - minimum
time_d_float = time_d.total_seconds()
time_d_rand = dt.timedelta(seconds=time_d_float * local_random.random())
generated = minimum + time_d_rand
return generated | Generate a random date.
The generated dates are uniformly distributed.
Parameters
----------
minimum : datetime object
maximum : datetime object
local_random : random.Random
Returns
-------
generated_date : datetime object
Examples
--------
>>> import random; r = random.Random(); r.seed(0)
>>> from datetime import datetime
>>> generate(datetime(2018, 1, 1), datetime(2018, 1, 2), local_random=r)
datetime.datetime(2018, 1, 1, 20, 15, 58, 47972)
>>> generate(datetime(2018, 1, 1), datetime(2018, 1, 2), local_random=r)
datetime.datetime(2018, 1, 1, 18, 11, 27, 260414) | entailment |
def run_init(args):
"""
Run project initialization.
This will ask the user for input.
Parameters
----------
args : argparse named arguments
"""
root = args.root
if root is None:
root = '.'
root = os.path.abspath(root)
project_data = _get_package_data()
project_name = project_data['project_name']
directories = [os.path.join(root, 'bin'),
os.path.join(root, 'docs'),
os.path.join(root, 'tests'),
os.path.join(root, project_name),
]
for dir_path in directories:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
script_paths = [os.path.join(root, 'README.md'),
os.path.join(root, 'tests/__init__.py'),
]
for script_path in script_paths:
if not os.path.exists(script_path):
os.mknod(script_path)
copy_samples = [(resource_filename('mpu', 'package/templates/tox.ini.txt'),
os.path.join(root, 'tox.ini')),
(resource_filename('mpu',
'package/templates/setup.cfg.txt'),
os.path.join(root, 'setup.cfg')),
(resource_filename('mpu',
'package/templates/setup.py.txt'),
os.path.join(root, 'setup.py')),
(resource_filename('mpu',
'package/templates/_version.py.txt'),
os.path.join(root, project_name + '/_version.py')),
(resource_filename('mpu',
'package/templates/coveragerc.txt'),
os.path.join(root, '.coveragerc')),
(resource_filename('mpu', 'package/templates/init.py.txt'),
os.path.join(root, project_name + '/__init__.py')),
]
translate = {'[[project_name]]': project_data['project_name'],
'[[license]]': project_data['license'],
'[[author]]': project_data['author'],
'[[email]]': project_data['email'],
}
for source, destination in copy_samples:
if not os.path.exists(destination):
copyfile(source, destination)
_adjust_template(destination, translate) | Run project initialization.
This will ask the user for input.
Parameters
----------
args : argparse named arguments | entailment |
def _multiple_replace(text, search_replace_dict):
"""
Replace multiple things at once in a text.
Parameters
----------
text : str
search_replace_dict : dict
Returns
-------
replaced_text : str
Examples
--------
>>> d = {'a': 'b', 'b': 'c', 'c': 'd', 'd': 'e'}
>>> _multiple_replace('abcdefghijklm', d)
'bcdeefghijklm'
"""
# Create a regular expression from all of the dictionary keys
regex = re.compile("|".join(map(re.escape, search_replace_dict.keys())))
# For each match, look up the corresponding value in the dictionary
return regex.sub(lambda match: search_replace_dict[match.group(0)], text) | Replace multiple things at once in a text.
Parameters
----------
text : str
search_replace_dict : dict
Returns
-------
replaced_text : str
Examples
--------
>>> d = {'a': 'b', 'b': 'c', 'c': 'd', 'd': 'e'}
>>> _multiple_replace('abcdefghijklm', d)
'bcdeefghijklm' | entailment |
def _adjust_template(filepath, translate):
"""
Search and replace contents of a filepath.
Parameters
----------
filepath : str
translate : dict
"""
with open(filepath, 'r') as file:
filedata = file.read()
filedata = _multiple_replace(filedata, translate)
with open(filepath, 'w') as file:
file.write(filedata) | Search and replace contents of a filepath.
Parameters
----------
filepath : str
translate : dict | entailment |
def get_parser(parser=None):
"""Get parser for mpu."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
if parser is None:
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers()
pkg_init_parser = subparsers.add_parser('init')
pkg_init_parser.add_argument("root",
nargs='?',
help="project root - should be empty")
pkg_init_parser.set_defaults(func=run_init)
return parser | Get parser for mpu. | entailment |
def parallel_for(loop_function, parameters, nb_threads=100):
"""
Execute the loop body in parallel.
.. note:: Race-Conditions
Executing code in parallel can cause an error class called
"race-condition".
Parameters
----------
loop_function : Python function which takes a tuple as input
parameters : List of tuples
Each element here should be executed in parallel.
Returns
-------
return_values : list of return values
"""
import multiprocessing.pool
from contextlib import closing
with closing(multiprocessing.pool.ThreadPool(nb_threads)) as pool:
return pool.map(loop_function, parameters) | Execute the loop body in parallel.
.. note:: Race-Conditions
Executing code in parallel can cause an error class called
"race-condition".
Parameters
----------
loop_function : Python function which takes a tuple as input
parameters : List of tuples
Each element here should be executed in parallel.
Returns
-------
return_values : list of return values | entailment |
def clip(number, lowest=None, highest=None):
"""
Clip a number to a given lowest / highest value.
Parameters
----------
number : number
lowest : number, optional
highest : number, optional
Returns
-------
clipped_number : number
Examples
--------
>>> clip(42, lowest=0, highest=10)
10
"""
if lowest is not None:
number = max(number, lowest)
if highest is not None:
number = min(number, highest)
return number | Clip a number to a given lowest / highest value.
Parameters
----------
number : number
lowest : number, optional
highest : number, optional
Returns
-------
clipped_number : number
Examples
--------
>>> clip(42, lowest=0, highest=10)
10 | entailment |
def consistent_shuffle(*lists):
"""
Shuffle lists consistently.
Parameters
----------
*lists
Variable length number of lists
Returns
-------
shuffled_lists : tuple of lists
All of the lists are shuffled consistently
Examples
--------
>>> import mpu, random; random.seed(8)
>>> mpu.consistent_shuffle([1,2,3], ['a', 'b', 'c'], ['A', 'B', 'C'])
([3, 2, 1], ['c', 'b', 'a'], ['C', 'B', 'A'])
"""
perm = list(range(len(lists[0])))
random.shuffle(perm)
lists = tuple([sublist[index] for index in perm]
for sublist in lists)
return lists | Shuffle lists consistently.
Parameters
----------
*lists
Variable length number of lists
Returns
-------
shuffled_lists : tuple of lists
All of the lists are shuffled consistently
Examples
--------
>>> import mpu, random; random.seed(8)
>>> mpu.consistent_shuffle([1,2,3], ['a', 'b', 'c'], ['A', 'B', 'C'])
([3, 2, 1], ['c', 'b', 'a'], ['C', 'B', 'A']) | entailment |
def haversine_distance(origin, destination):
"""
Calculate the Haversine distance.
Parameters
----------
origin : tuple of float
(lat, long)
destination : tuple of float
(lat, long)
Returns
-------
distance_in_km : float
Examples
--------
>>> munich = (48.1372, 11.5756)
>>> berlin = (52.5186, 13.4083)
>>> round(haversine_distance(munich, berlin), 1)
504.2
>>> new_york_city = (40.712777777778, -74.005833333333) # NYC
>>> round(haversine_distance(berlin, new_york_city), 1)
6385.3
"""
lat1, lon1 = origin
lat2, lon2 = destination
if not (-90.0 <= lat1 <= 90):
raise ValueError('lat1={:2.2f}, but must be in [-90,+90]'.format(lat1))
if not (-90.0 <= lat2 <= 90):
raise ValueError('lat2={:2.2f}, but must be in [-90,+90]'.format(lat2))
if not (-180.0 <= lon1 <= 180):
raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'
.format(lat1))
if not (-180.0 <= lon2 <= 180):
raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'
.format(lat1))
radius = 6371 # km
dlat = math_stl.radians(lat2 - lat1)
dlon = math_stl.radians(lon2 - lon1)
a = (math_stl.sin(dlat / 2) * math_stl.sin(dlat / 2) +
math_stl.cos(math_stl.radians(lat1)) *
math_stl.cos(math_stl.radians(lat2)) *
math_stl.sin(dlon / 2) * math_stl.sin(dlon / 2))
c = 2 * math_stl.atan2(math_stl.sqrt(a), math_stl.sqrt(1 - a))
d = radius * c
return d | Calculate the Haversine distance.
Parameters
----------
origin : tuple of float
(lat, long)
destination : tuple of float
(lat, long)
Returns
-------
distance_in_km : float
Examples
--------
>>> munich = (48.1372, 11.5756)
>>> berlin = (52.5186, 13.4083)
>>> round(haversine_distance(munich, berlin), 1)
504.2
>>> new_york_city = (40.712777777778, -74.005833333333) # NYC
>>> round(haversine_distance(berlin, new_york_city), 1)
6385.3 | entailment |
def is_in_intervall(value, min_value, max_value, name='variable'):
"""
Raise an exception if value is not in an interval.
Parameters
----------
value : orderable
min_value : orderable
max_value : orderable
name : str
Name of the variable to print in exception.
"""
if not (min_value <= value <= max_value):
raise ValueError('{}={} is not in [{}, {}]'
.format(name, value, min_value, max_value)) | Raise an exception if value is not in an interval.
Parameters
----------
value : orderable
min_value : orderable
max_value : orderable
name : str
Name of the variable to print in exception. | entailment |
def exception_logging(exctype, value, tb):
"""
Log exception by using the root logger.
Use it as `sys.excepthook = exception_logging`.
Parameters
----------
exctype : type
value : NameError
tb : traceback
"""
write_val = {'exception_type': str(exctype),
'message': str(traceback.format_tb(tb, 10))}
logging.exception(str(write_val)) | Log exception by using the root logger.
Use it as `sys.excepthook = exception_logging`.
Parameters
----------
exctype : type
value : NameError
tb : traceback | entailment |
def latitude(self, latitude):
"""Setter for latiutde."""
if not (-90 <= latitude <= 90):
raise ValueError('latitude was {}, but has to be in [-90, 90]'
.format(latitude))
self._latitude = latitude | Setter for latiutde. | entailment |
def longitude(self, longitude):
"""Setter for longitude."""
if not (-180 <= longitude <= 180):
raise ValueError('longitude was {}, but has to be in [-180, 180]'
.format(longitude))
self._longitude = longitude | Setter for longitude. | entailment |
def distance(self, there):
"""
Calculate the distance from this location to there.
Parameters
----------
there : Location
Returns
-------
distance_in_m : float
"""
return haversine_distance((self.latitude, self.longitude),
(there.latitude, there.longitude)) | Calculate the distance from this location to there.
Parameters
----------
there : Location
Returns
-------
distance_in_m : float | entailment |
def main():
"""Command line interface of mpu."""
parser = get_parser()
args = parser.parse_args()
if hasattr(args, 'func') and args.func:
args.func(args)
else:
parser.print_help() | Command line interface of mpu. | entailment |
def get_parser():
"""Get parser for mpu."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--version',
action='version',
version='mpu {}'.format(mpu.__version__))
subparsers = parser.add_subparsers(help='Python package commands')
package_parser = subparsers.add_parser('package')
mpu.package.cli.get_parser(package_parser)
return parser | Get parser for mpu. | entailment |
def read(filepath, **kwargs):
"""
Read a file.
Supported formats:
* CSV
* JSON, JSONL
* pickle
Parameters
----------
filepath : str
Path to the file that should be read. This methods action depends
mainly on the file extension.
kwargs : dict
Any keywords for the specific file format. For CSV, this is
'delimiter', 'quotechar', 'skiprows', 'format'
Returns
-------
data : str or bytes
"""
if filepath.lower().endswith('.csv'):
return _read_csv(filepath, kwargs)
elif filepath.lower().endswith('.json'):
with open(filepath) as data_file:
data = json.load(data_file, **kwargs)
return data
elif filepath.lower().endswith('.jsonl'):
return _read_jsonl(filepath, kwargs)
elif filepath.lower().endswith('.pickle'):
with open(filepath, 'rb') as handle:
data = pickle.load(handle)
return data
elif (filepath.lower().endswith('.yml') or
filepath.lower().endswith('.yaml')):
raise NotImplementedError('YAML is not supported, because you need '
'PyYAML in Python3. '
'See '
'https://stackoverflow.com/a/42054860/562769'
' as a guide how to use it.')
elif (filepath.lower().endswith('.h5') or
filepath.lower().endswith('.hdf5')):
raise NotImplementedError('HDF5 is not supported. See '
'https://stackoverflow.com/a/41586571/562769'
' as a guide how to use it.')
else:
raise NotImplementedError('File \'{}\' is not known.'.format(filepath)) | Read a file.
Supported formats:
* CSV
* JSON, JSONL
* pickle
Parameters
----------
filepath : str
Path to the file that should be read. This methods action depends
mainly on the file extension.
kwargs : dict
Any keywords for the specific file format. For CSV, this is
'delimiter', 'quotechar', 'skiprows', 'format'
Returns
-------
data : str or bytes | entailment |
def _read_csv(filepath, kwargs):
"""See documentation of mpu.io.read."""
if 'delimiter' not in kwargs:
kwargs['delimiter'] = ','
if 'quotechar' not in kwargs:
kwargs['quotechar'] = '"'
if 'skiprows' not in kwargs:
kwargs['skiprows'] = []
if isinstance(kwargs['skiprows'], int):
kwargs['skiprows'] = [i for i in range(kwargs['skiprows'])]
if 'format' in kwargs:
format_ = kwargs['format']
kwargs.pop('format', None)
else:
format_ = 'default'
skiprows = kwargs['skiprows']
kwargs.pop('skiprows', None)
kwargs_open = {'newline': ''}
mode = 'r'
if sys.version_info < (3, 0):
kwargs_open.pop('newline', None)
mode = 'rb'
with open(filepath, mode, **kwargs_open) as fp:
if format_ == 'default':
reader = csv.reader(fp, **kwargs)
data = EList([row for row in reader])
data = data.remove_indices(skiprows)
elif format_ == 'dicts':
reader_list = csv.DictReader(fp, **kwargs)
data = [row for row in reader_list]
else:
raise NotImplementedError('Format \'{}\' unknown'
.format(format_))
return data | See documentation of mpu.io.read. | entailment |
def _read_jsonl(filepath, kwargs):
"""See documentation of mpu.io.read."""
with open(filepath) as data_file:
data = [json.loads(line, **kwargs)
for line in data_file
if len(line) > 0]
return data | See documentation of mpu.io.read. | entailment |
def write(filepath, data, **kwargs):
"""
Write a file.
Supported formats:
* CSV
* JSON, JSONL
* pickle
Parameters
----------
filepath : str
Path to the file that should be read. This methods action depends
mainly on the file extension.
data : dict or list
Content that should be written
kwargs : dict
Any keywords for the specific file format.
Returns
-------
data : str or bytes
"""
if filepath.lower().endswith('.csv'):
return _write_csv(filepath, data, kwargs)
elif filepath.lower().endswith('.json'):
return _write_json(filepath, data, kwargs)
elif filepath.lower().endswith('.jsonl'):
return _write_jsonl(filepath, data, kwargs)
elif filepath.lower().endswith('.pickle'):
return _write_pickle(filepath, data, kwargs)
elif (filepath.lower().endswith('.yml') or
filepath.lower().endswith('.yaml')):
raise NotImplementedError('YAML is not supported, because you need '
'PyYAML in Python3. '
'See '
'https://stackoverflow.com/a/42054860/562769'
' as a guide how to use it.')
elif (filepath.lower().endswith('.h5') or
filepath.lower().endswith('.hdf5')):
raise NotImplementedError('YAML is not supported. See '
'https://stackoverflow.com/a/41586571/562769'
' as a guide how to use it.')
else:
raise NotImplementedError('File \'{}\' is not known.'.format(filepath)) | Write a file.
Supported formats:
* CSV
* JSON, JSONL
* pickle
Parameters
----------
filepath : str
Path to the file that should be read. This methods action depends
mainly on the file extension.
data : dict or list
Content that should be written
kwargs : dict
Any keywords for the specific file format.
Returns
-------
data : str or bytes | entailment |
def _write_csv(filepath, data, kwargs):
"""See documentation of mpu.io.write."""
kwargs_open = {'newline': ''}
mode = 'w'
if sys.version_info < (3, 0):
kwargs_open.pop('newline', None)
mode = 'wb'
with open(filepath, mode, **kwargs_open) as fp:
if 'delimiter' not in kwargs:
kwargs['delimiter'] = ','
if 'quotechar' not in kwargs:
kwargs['quotechar'] = '"'
with open(filepath, 'w') as fp:
writer = csv.writer(fp, **kwargs)
writer.writerows(data)
return data | See documentation of mpu.io.write. | entailment |
def _write_json(filepath, data, kwargs):
"""See documentation of mpu.io.write."""
with io_stl.open(filepath, 'w', encoding='utf8') as outfile:
if 'indent' not in kwargs:
kwargs['indent'] = 4
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = True
if 'separators' not in kwargs:
kwargs['separators'] = (',', ': ')
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
str_ = json.dumps(data, **kwargs)
outfile.write(to_unicode(str_))
return data | See documentation of mpu.io.write. | entailment |
def _write_jsonl(filepath, data, kwargs):
"""See documentation of mpu.io.write."""
with io_stl.open(filepath, 'w', encoding='utf8') as outfile:
kwargs['indent'] = None # JSON has to be on one line!
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = True
if 'separators' not in kwargs:
kwargs['separators'] = (',', ': ')
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
for line in data:
str_ = json.dumps(line, **kwargs)
outfile.write(to_unicode(str_))
outfile.write(u'\n')
return data | See documentation of mpu.io.write. | entailment |
def _write_pickle(filepath, data, kwargs):
"""See documentation of mpu.io.write."""
if 'protocol' not in kwargs:
kwargs['protocol'] = pickle.HIGHEST_PROTOCOL
with open(filepath, 'wb') as handle:
pickle.dump(data, handle, **kwargs)
return data | See documentation of mpu.io.write. | entailment |
def urlread(url, encoding='utf8'):
"""
Read the content of an URL.
Parameters
----------
url : str
Returns
-------
content : str
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
response = urlopen(url)
content = response.read()
content = content.decode(encoding)
return content | Read the content of an URL.
Parameters
----------
url : str
Returns
-------
content : str | entailment |
def download(source, sink=None):
"""
Download a file.
Parameters
----------
source : str
Where the file comes from. Some URL.
sink : str or None (default: same filename in current directory)
Where the file gets stored. Some filepath in the local file system.
"""
try:
from urllib.request import urlretrieve # Python 3
except ImportError:
from urllib import urlretrieve # Python 2
if sink is None:
sink = os.path.abspath(os.path.split(source)[1])
urlretrieve(source, sink)
return sink | Download a file.
Parameters
----------
source : str
Where the file comes from. Some URL.
sink : str or None (default: same filename in current directory)
Where the file gets stored. Some filepath in the local file system. | entailment |
def hash(filepath, method='sha1', buffer_size=65536):
"""
Calculate a hash of a local file.
Parameters
----------
filepath : str
method : {'sha1', 'md5'}
buffer_size : int, optional (default: 65536 byte = 64 KiB)
in byte
Returns
-------
hash : str
"""
if method == 'sha1':
hash_function = hashlib.sha1()
elif method == 'md5':
hash_function = hashlib.md5()
else:
raise NotImplementedError('Only md5 and sha1 hashes are known, but '
' \'{}\' was specified.'.format(method))
with open(filepath, 'rb') as fp:
while True:
data = fp.read(buffer_size)
if not data:
break
hash_function.update(data)
return hash_function.hexdigest() | Calculate a hash of a local file.
Parameters
----------
filepath : str
method : {'sha1', 'md5'}
buffer_size : int, optional (default: 65536 byte = 64 KiB)
in byte
Returns
-------
hash : str | entailment |
def get_creation_datetime(filepath):
"""
Get the date that a file was created.
Parameters
----------
filepath : str
Returns
-------
creation_datetime : datetime.datetime or None
"""
if platform.system() == 'Windows':
return datetime.fromtimestamp(os.path.getctime(filepath))
else:
stat = os.stat(filepath)
try:
return datetime.fromtimestamp(stat.st_birthtime)
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return None | Get the date that a file was created.
Parameters
----------
filepath : str
Returns
-------
creation_datetime : datetime.datetime or None | entailment |
def get_modification_datetime(filepath):
"""
Get the datetime that a file was last modified.
Parameters
----------
filepath : str
Returns
-------
modification_datetime : datetime.datetime
"""
import tzlocal
timezone = tzlocal.get_localzone()
mtime = datetime.fromtimestamp(os.path.getmtime(filepath))
return mtime.replace(tzinfo=timezone) | Get the datetime that a file was last modified.
Parameters
----------
filepath : str
Returns
-------
modification_datetime : datetime.datetime | entailment |
def get_access_datetime(filepath):
"""
Get the last time filepath was accessed.
Parameters
----------
filepath : str
Returns
-------
access_datetime : datetime.datetime
"""
import tzlocal
tz = tzlocal.get_localzone()
mtime = datetime.fromtimestamp(os.path.getatime(filepath))
return mtime.replace(tzinfo=tz) | Get the last time filepath was accessed.
Parameters
----------
filepath : str
Returns
-------
access_datetime : datetime.datetime | entailment |
def get_file_meta(filepath):
"""
Get meta-information about a file.
Parameters
----------
filepath : str
Returns
-------
meta : dict
"""
meta = {}
meta['filepath'] = os.path.abspath(filepath)
meta['creation_datetime'] = get_creation_datetime(filepath)
meta['last_access_datetime'] = get_access_datetime(filepath)
meta['modification_datetime'] = get_modification_datetime(filepath)
try:
import magic
f_mime = magic.Magic(mime=True, uncompress=True)
f_other = magic.Magic(mime=False, uncompress=True)
meta['mime'] = f_mime.from_file(meta['filepath'])
meta['magic-type'] = f_other.from_file(meta['filepath'])
except ImportError:
pass
return meta | Get meta-information about a file.
Parameters
----------
filepath : str
Returns
-------
meta : dict | entailment |
def gzip_file(source, sink):
"""
Create a GZIP file from a source file.
Parameters
----------
source : str
Filepath
sink : str
Filepath
"""
import gzip
with open(source, 'rb') as f_in, gzip.open(sink, 'wb') as f_out:
f_out.writelines(f_in) | Create a GZIP file from a source file.
Parameters
----------
source : str
Filepath
sink : str
Filepath | entailment |
def start(self):
"""
Start the patch
"""
self._patcher = mock.patch(target=self.target)
MockClient = self._patcher.start()
instance = MockClient.return_value
instance.model.side_effect = mock.Mock(
side_effect=self.model
) | Start the patch | entailment |
def setup(cls, client_id, client_secret):
"""Configure client in session
"""
cls.client_id = client_id
cls.client_secret = client_secret | Configure client in session | entailment |
def read(filelines, mapping=None, wok=False):
"""Parse a ris lines and return a list of entries.
Entries are codified as dictionaries whose keys are the
different tags. For single line and singly occurring tags,
the content is codified as a string. In the case of multiline
or multiple key occurrences, the content is returned as a list
of strings.
Keyword arguments:
bibliography_file -- ris filehandle
mapping -- custom RIS tags mapping
wok -- flag, Web of Knowledge format is used if True, otherwise
Refman's RIS specifications are used.
"""
if wok:
if not mapping:
mapping = WOK_TAG_KEY_MAPPING
return Wok(filelines, mapping).parse()
else:
if not mapping:
mapping = TAG_KEY_MAPPING
return Ris(filelines, mapping).parse() | Parse a ris lines and return a list of entries.
Entries are codified as dictionaries whose keys are the
different tags. For single line and singly occurring tags,
the content is codified as a string. In the case of multiline
or multiple key occurrences, the content is returned as a list
of strings.
Keyword arguments:
bibliography_file -- ris filehandle
mapping -- custom RIS tags mapping
wok -- flag, Web of Knowledge format is used if True, otherwise
Refman's RIS specifications are used. | entailment |
def refresh_context(self):
"""
Get the default context of the user and save it
"""
User = self.model('res.user')
self.context = User.get_preferences(True)
return self.context | Get the default context of the user and save it | entailment |
def login(self, login, password, set_auth=False):
"""
Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client
"""
rv = self.session.post(
self.host,
dumps({
"method": "common.db.login",
"params": [login, password]
}),
)
rv = loads(rv.content)['result']
if set_auth:
self.set_auth(
SessionAuth(login, *rv)
)
return rv | Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client | entailment |
def is_auth_alive(self):
"Return true if the auth is not expired, else false"
model = self.model('ir.model')
try:
model.search([], None, 1, None)
except ClientError as err:
if err and err.message['code'] == 403:
return False
raise
except Exception:
raise
else:
return True | Return true if the auth is not expired, else false | entailment |
def update(self, data=None, **kwargs):
"""
Update the record right away.
:param data: dictionary of changes
:param kwargs: possibly a list of keyword args to change
"""
if data is None:
data = {}
data.update(kwargs)
return self.model.write([self.id], data) | Update the record right away.
:param data: dictionary of changes
:param kwargs: possibly a list of keyword args to change | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.