sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def unshare(self, plotters, keys=None, auto_update=False, draw=None):
"""
Close the sharing connection of this plotter with others
This method undoes the sharing connections made by the :meth:`share`
method and releases the given `plotters` again, such that the
formatoptions in this plotter may be updated again to values different
from this one.
Parameters
----------
plotters: list of :class:`Plotter` instances or a :class:`Plotter`
The plotters to release
keys: string or iterable of strings
The formatoptions to unshare, or group names of formatoptions to
unshare all formatoptions of that group (see the
:attr:`fmt_groups` property). If None, all formatoptions of this
plotter are unshared.
%(InteractiveBase.start_update.parameters.draw)s
%(InteractiveBase.update.parameters.auto_update)s
See Also
--------
share, unshare_me"""
auto_update = auto_update or not self.no_auto_update
if isinstance(plotters, Plotter):
plotters = [plotters]
keys = self._set_sharing_keys(keys)
for plotter in plotters:
plotter.unshare_me(keys, auto_update=auto_update, draw=draw,
update_other=False)
self.update(force=keys, auto_update=auto_update, draw=draw) | Close the sharing connection of this plotter with others
This method undoes the sharing connections made by the :meth:`share`
method and releases the given `plotters` again, such that the
formatoptions in this plotter may be updated again to values different
from this one.
Parameters
----------
plotters: list of :class:`Plotter` instances or a :class:`Plotter`
The plotters to release
keys: string or iterable of strings
The formatoptions to unshare, or group names of formatoptions to
unshare all formatoptions of that group (see the
:attr:`fmt_groups` property). If None, all formatoptions of this
plotter are unshared.
%(InteractiveBase.start_update.parameters.draw)s
%(InteractiveBase.update.parameters.auto_update)s
See Also
--------
share, unshare_me | entailment |
def unshare_me(self, keys=None, auto_update=False, draw=None,
update_other=True):
"""
Close the sharing connection of this plotter with others
This method undoes the sharing connections made by the :meth:`share`
method and release this plotter again.
Parameters
----------
keys: string or iterable of strings
The formatoptions to unshare, or group names of formatoptions to
unshare all formatoptions of that group (see the
:attr:`fmt_groups` property). If None, all formatoptions of this
plotter are unshared.
%(InteractiveBase.start_update.parameters.draw)s
%(InteractiveBase.update.parameters.auto_update)s
See Also
--------
share, unshare"""
auto_update = auto_update or not self.no_auto_update
keys = self._set_sharing_keys(keys)
to_update = []
for key in keys:
fmto = getattr(self, key)
try:
other_fmto = self._shared.pop(key)
except KeyError:
pass
else:
other_fmto.shared.remove(fmto)
if update_other:
other_fmto.plotter._register_update(
force=[other_fmto.key])
to_update.append(other_fmto.plotter)
self.update(force=keys, draw=draw, auto_update=auto_update)
if update_other and auto_update:
for plotter in to_update:
plotter.start_update(draw=draw) | Close the sharing connection of this plotter with others
This method undoes the sharing connections made by the :meth:`share`
method and release this plotter again.
Parameters
----------
keys: string or iterable of strings
The formatoptions to unshare, or group names of formatoptions to
unshare all formatoptions of that group (see the
:attr:`fmt_groups` property). If None, all formatoptions of this
plotter are unshared.
%(InteractiveBase.start_update.parameters.draw)s
%(InteractiveBase.update.parameters.auto_update)s
See Also
--------
share, unshare | entailment |
def has_changed(self, key, include_last=True):
"""
Determine whether a formatoption changed in the last update
Parameters
----------
key: str
A formatoption key contained in this plotter
include_last: bool
if True and the formatoption has been included in the last update,
the return value will not be None. Otherwise the return value will
only be not None if it changed during the last update
Returns
-------
None or list
- None, if the value has not been changed during the last update or
`key` is not a valid formatoption key
- a list of length two with the old value in the first place and
the given `value` at the second"""
if self._initializing or key not in self:
return
fmto = getattr(self, key)
if self._old_fmt and key in self._old_fmt[-1]:
old_val = self._old_fmt[-1][key]
else:
old_val = fmto.default
if (fmto.diff(old_val) or (include_last and
fmto.key in self._last_update)):
return [old_val, fmto.value] | Determine whether a formatoption changed in the last update
Parameters
----------
key: str
A formatoption key contained in this plotter
include_last: bool
if True and the formatoption has been included in the last update,
the return value will not be None. Otherwise the return value will
only be not None if it changed during the last update
Returns
-------
None or list
- None, if the value has not been changed during the last update or
`key` is not a valid formatoption key
- a list of length two with the old value in the first place and
the given `value` at the second | entailment |
def execute(cmd):
""" execute command, return rc and output string.
The cmd argument can be a string or a list composed of
the command name and each of its argument.
eg, ['/usr/bin/cp', '-r', 'src', 'dst'] """
# Parse cmd string to a list
if not isinstance(cmd, list):
cmd = shlex.split(cmd)
# Execute command
rc = 0
output = ""
try:
output = subprocess.check_output(cmd, close_fds=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
rc = err.returncode
output = err.output
except Exception as err:
err_msg = ('Command "%s" Error: %s' % (' '.join(cmd), str(err)))
raise exception.SDKInternalError(msg=err_msg)
output = bytes.decode(output)
return (rc, output) | execute command, return rc and output string.
The cmd argument can be a string or a list composed of
the command name and each of its argument.
eg, ['/usr/bin/cp', '-r', 'src', 'dst'] | entailment |
def looping_call(f, sleep=5, inc_sleep=0, max_sleep=60, timeout=600,
exceptions=(), *args, **kwargs):
"""Helper function that to run looping call with fixed/dynamical interval.
:param f: the looping call function or method.
:param sleep: initial interval of the looping calls.
:param inc_sleep: sleep time increment, default as 0.
:param max_sleep: max sleep time.
:param timeout: looping call timeout in seconds, 0 means no timeout.
:param exceptions: exceptions that trigger re-try.
"""
time_start = time.time()
expiration = time_start + timeout
retry = True
while retry:
expired = timeout and (time.time() > expiration)
LOG.debug(
"timeout is %(timeout)s, expiration is %(expiration)s, \
time_start is %(time_start)s" %
{"timeout": timeout, "expiration": expiration,
"time_start": time_start})
try:
f(*args, **kwargs)
except exceptions:
retry = not expired
if retry:
LOG.debug("Will re-try %(fname)s in %(itv)d seconds" %
{'fname': f.__name__, 'itv': sleep})
time.sleep(sleep)
sleep = min(sleep + inc_sleep, max_sleep)
else:
LOG.debug("Looping call %s timeout" % f.__name__)
continue
retry = False | Helper function that to run looping call with fixed/dynamical interval.
:param f: the looping call function or method.
:param sleep: initial interval of the looping calls.
:param inc_sleep: sleep time increment, default as 0.
:param max_sleep: max sleep time.
:param timeout: looping call timeout in seconds, 0 means no timeout.
:param exceptions: exceptions that trigger re-try. | entailment |
def convert_to_mb(s):
"""Convert memory size from GB to MB."""
s = s.upper()
try:
if s.endswith('G'):
return float(s[:-1].strip()) * 1024
elif s.endswith('T'):
return float(s[:-1].strip()) * 1024 * 1024
else:
return float(s[:-1].strip())
except (IndexError, ValueError, KeyError, TypeError):
errmsg = ("Invalid memory format: %s") % s
raise exception.SDKInternalError(msg=errmsg) | Convert memory size from GB to MB. | entailment |
def valid_mac_addr(addr):
''' Validates a mac address'''
if not isinstance(addr, six.string_types):
return False
valid = re.compile(r'''
(^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$)
''',
re.VERBOSE | re.IGNORECASE)
return valid.match(addr) is not None | Validates a mac address | entailment |
def check_input_types(*types, **validkeys):
"""This is a function decorator to check all input parameters given to
decorated function are in expected types.
The checks can be skipped by specify skip_input_checks=True in decorated
function.
:param tuple types: expected types of input parameters to the decorated
function
:param validkeys: valid keywords(str) in a list.
e.g. validkeys=['key1', 'key2']
"""
def decorator(function):
@functools.wraps(function)
def wrap_func(*args, **kwargs):
if args[0]._skip_input_check:
# skip input check
return function(*args, **kwargs)
# drop class object self
inputs = args[1:]
if (len(inputs) > len(types)):
msg = ("Too many parameters provided: %(specified)d specified,"
"%(expected)d expected." %
{'specified': len(inputs), 'expected': len(types)})
LOG.info(msg)
raise exception.SDKInvalidInputNumber(function.__name__,
len(types), len(inputs))
argtypes = tuple(map(type, inputs))
match_types = types[0:len(argtypes)]
invalid_type = False
invalid_userid_idx = -1
for idx in range(len(argtypes)):
_mtypes = match_types[idx]
if not isinstance(_mtypes, tuple):
_mtypes = (_mtypes,)
argtype = argtypes[idx]
if constants._TUSERID in _mtypes:
userid_type = True
for _tmtype in _mtypes:
if ((argtype == _tmtype) and
(_tmtype != constants._TUSERID)):
userid_type = False
if (userid_type and
(not valid_userid(inputs[idx]))):
invalid_userid_idx = idx
break
elif argtype not in _mtypes:
invalid_type = True
break
if invalid_userid_idx != -1:
msg = ("Invalid string value found at the #%d parameter, "
"length should be less or equal to 8 and should not be "
"null or contain spaces." % (invalid_userid_idx + 1))
LOG.info(msg)
raise exception.SDKInvalidInputFormat(msg=msg)
if invalid_type:
msg = ("Invalid input types: %(argtypes)s; "
"Expected types: %(types)s" %
{'argtypes': str(argtypes), 'types': str(types)})
LOG.info(msg)
raise exception.SDKInvalidInputTypes(function.__name__,
str(types), str(argtypes))
valid_keys = validkeys.get('valid_keys')
if valid_keys:
for k in kwargs.keys():
if k not in valid_keys:
msg = ("Invalid keyword: %(key)s; "
"Expected keywords are: %(keys)s" %
{'key': k, 'keys': str(valid_keys)})
LOG.info(msg)
raise exception.SDKInvalidInputFormat(msg=msg)
return function(*args, **kwargs)
return wrap_func
return decorator | This is a function decorator to check all input parameters given to
decorated function are in expected types.
The checks can be skipped by specify skip_input_checks=True in decorated
function.
:param tuple types: expected types of input parameters to the decorated
function
:param validkeys: valid keywords(str) in a list.
e.g. validkeys=['key1', 'key2'] | entailment |
def expect_invalid_resp_data(data=''):
"""Catch exceptions when using zvm client response data."""
try:
yield
except (ValueError, TypeError, IndexError, AttributeError,
KeyError) as err:
msg = ('Invalid smt response data: %s. Error: %s' %
(data, six.text_type(err)))
LOG.error(msg)
raise exception.SDKInternalError(msg=msg) | Catch exceptions when using zvm client response data. | entailment |
def wrap_invalid_resp_data_error(function):
"""Catch exceptions when using zvm client response data."""
@functools.wraps(function)
def decorated_function(*arg, **kwargs):
try:
return function(*arg, **kwargs)
except (ValueError, TypeError, IndexError, AttributeError,
KeyError) as err:
msg = ('Invalid smt response data. Error: %s' %
six.text_type(err))
LOG.error(msg)
raise exception.SDKInternalError(msg=msg)
return decorated_function | Catch exceptions when using zvm client response data. | entailment |
def expect_and_reraise_internal_error(modID='SDK'):
"""Catch all kinds of zvm client request failure and reraise.
modID: the moduleID that the internal error happens in.
"""
try:
yield
except exception.SDKInternalError as err:
msg = err.format_message()
raise exception.SDKInternalError(msg, modID=modID) | Catch all kinds of zvm client request failure and reraise.
modID: the moduleID that the internal error happens in. | entailment |
def log_and_reraise_smt_request_failed(action=None):
"""Catch SDK base exception and print error log before reraise exception.
msg: the error message to be logged.
"""
try:
yield
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg) | Catch SDK base exception and print error log before reraise exception.
msg: the error message to be logged. | entailment |
def get_smt_userid():
"""Get the userid of smt server"""
cmd = ["sudo", "/sbin/vmcp", "query userid"]
try:
userid = subprocess.check_output(cmd,
close_fds=True,
stderr=subprocess.STDOUT)
userid = bytes.decode(userid)
userid = userid.split()[0]
return userid
except Exception as err:
msg = ("Could not find the userid of the smt server: %s") % err
raise exception.SDKInternalError(msg=msg) | Get the userid of smt server | entailment |
def get_namelist():
"""Generate namelist.
Either through set CONF.zvm.namelist, or by generate based on smt userid.
"""
if CONF.zvm.namelist is not None:
# namelist length limit should be 64, but there's bug limit to 8
# will change the limit to 8 once the bug fixed
if len(CONF.zvm.namelist) <= 8:
return CONF.zvm.namelist
# return ''.join(('NL', get_smt_userid().rjust(6, '0')[-6:]))
# py3 compatible changes
userid = get_smt_userid()
return 'NL' + userid.rjust(6, '0')[-6:] | Generate namelist.
Either through set CONF.zvm.namelist, or by generate based on smt userid. | entailment |
def generate_iucv_authfile(fn, client):
"""Generate the iucv_authorized_userid file"""
lines = ['#!/bin/bash\n',
'echo -n %s > /etc/iucv_authorized_userid\n' % client]
with open(fn, 'w') as f:
f.writelines(lines) | Generate the iucv_authorized_userid file | entailment |
def translate_response_to_dict(rawdata, dirt):
"""Translate SMT response to a python dictionary.
SMT response example:
keyword1: value1\n
keyword2: value2\n
...
keywordn: valuen\n
Will return a python dictionary:
{keyword1: value1,
keyword2: value2,
...
keywordn: valuen,}
"""
data_list = rawdata.split("\n")
data = {}
for ls in data_list:
for k in list(dirt.keys()):
if ls.__contains__(dirt[k]):
data[k] = ls[(ls.find(dirt[k]) + len(dirt[k])):].strip()
break
if data == {}:
msg = ("Invalid smt response data. Error: No value matched with "
"keywords. Raw Data: %(raw)s; Keywords: %(kws)s" %
{'raw': rawdata, 'kws': str(dirt)})
raise exception.SDKInternalError(msg=msg)
return data | Translate SMT response to a python dictionary.
SMT response example:
keyword1: value1\n
keyword2: value2\n
...
keywordn: valuen\n
Will return a python dictionary:
{keyword1: value1,
keyword2: value2,
...
keywordn: valuen,} | entailment |
def delete_guest(userid):
""" Destroy a virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
"""
# Check if the guest exists.
guest_list_info = client.send_request('guest_list')
# the string 'userid' need to be coded as 'u'userid' in case of py2 interpreter.
userid_1 = (unicode(userid, "utf-8") if sys.version[0] == '2' else userid)
if userid_1 not in guest_list_info['output']:
RuntimeError("Userid %s does not exist!" % userid)
# Delete the guest.
guest_delete_info = client.send_request('guest_delete', userid)
if guest_delete_info['overallRC']:
print("\nFailed to delete guest %s!" % userid)
else:
print("\nSucceeded to delete guest %s!" % userid) | Destroy a virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8 | entailment |
def describe_guest(userid):
""" Get the basic information of virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
"""
# Check if the guest exists.
guest_list_info = client.send_request('guest_list')
userid_1 = (unicode(userid, "utf-8") if sys.version[0] == '2' else userid)
if userid_1 not in guest_list_info['output']:
raise RuntimeError("Guest %s does not exist!" % userid)
guest_describe_info = client.send_request('guest_get_definition_info', userid)
print("\nThe created guest %s's info are: \n%s\n" % (userid, guest_describe_info)) | Get the basic information of virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8 | entailment |
def import_image(image_path, os_version):
""" Import the specific image.
Input parameters:
:image_path: Image file name
:os_version: Operation System version. e.g. rhel7.4
"""
image_name = os.path.basename(image_path)
print("\nChecking if image %s exists ..." % image_name)
image_query_info = client.send_request('image_query', imagename = image_name)
if image_query_info['overallRC']:
print("Importing image %s ..." % image_name)
url = "file://" + image_path
image_import_info = client.send_request('image_import', image_name, url,
{'os_version': os_version})
if image_import_info['overallRC']:
raise RuntimeError("Failed to import image %s!\n%s" %
(image_name, image_import_info))
else:
print("Succeeded to import image %s!" % image_name)
else:
print("Image %s already exists!" % image_name) | Import the specific image.
Input parameters:
:image_path: Image file name
:os_version: Operation System version. e.g. rhel7.4 | entailment |
def create_guest(userid, cpu, memory, disks_list, profile):
""" Create the userid.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:cpu: the number of vcpus
:memory: memory
:disks_list: list of disks to add
:profile: profile of the userid
"""
# Check if the userid already exists.
guest_list_info = client.send_request('guest_list')
userid_1 = (unicode(userid, "utf-8") if sys.version[0] == '2' else userid)
if userid_1 in guest_list_info['output']:
raise RuntimeError("Guest %s already exists!" % userid)
# Create the guest.
print("\nCreating guest: %s ..." % userid)
guest_create_info = client.send_request('guest_create', userid, cpu, memory,
disk_list = disks_list,
user_profile = profile)
if guest_create_info['overallRC']:
raise RuntimeError("Failed to create guest %s!\n%s" %
(userid, guest_create_info))
else:
print("Succeeded to create guest %s!" % userid) | Create the userid.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:cpu: the number of vcpus
:memory: memory
:disks_list: list of disks to add
:profile: profile of the userid | entailment |
def deploy_guest(userid, image_name):
""" Deploy image to root disk.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:image_path: Image file name
"""
print("\nDeploying %s to %s ..." % (image_name, userid))
guest_deploy_info = client.send_request('guest_deploy', userid, image_name)
# if failed to deploy, then delete the guest.
if guest_deploy_info['overallRC']:
print("\nFailed to deploy guest %s!\n%s" % (userid, guest_deploy_info))
print("\nDeleting the guest %s that failed to deploy..." % userid)
# call terminage_guest() to delete the guest that failed to deploy.
delete_guest(userid)
os._exit(0)
else:
print("Succeeded to deploy %s!" % userid) | Deploy image to root disk.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:image_path: Image file name | entailment |
def create_network(userid, os_version, network_info):
""" Create network device and configure network interface.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:os_version: os version of the image file
:network_info: dict of network info
"""
print("\nConfiguring network interface for %s ..." % userid)
network_create_info = client.send_request('guest_create_network_interface',
userid, os_version, network_info)
if network_create_info['overallRC']:
raise RuntimeError("Failed to create network for guest %s!\n%s" %
(userid, network_create_info))
else:
print("Succeeded to create network for guest %s!" % userid) | Create network device and configure network interface.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:os_version: os version of the image file
:network_info: dict of network info | entailment |
def coupleTo_vswitch(userid, vswitch_name):
""" Couple to vswitch.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:network_info: dict of network info
"""
print("\nCoupleing to vswitch for %s ..." % userid)
vswitch_info = client.send_request('guest_nic_couple_to_vswitch',
userid, '1000', vswitch_name)
if vswitch_info['overallRC']:
raise RuntimeError("Failed to couple to vswitch for guest %s!\n%s" %
(userid, vswitch_info))
else:
print("Succeeded to couple to vswitch for guest %s!" % userid) | Couple to vswitch.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:network_info: dict of network info | entailment |
def grant_user(userid, vswitch_name):
""" Grant user.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:network_info: dict of network info
"""
print("\nGranting user %s ..." % userid)
user_grant_info = client.send_request('vswitch_grant_user', vswitch_name, userid)
if user_grant_info['overallRC']:
raise RuntimeError("Failed to grant user %s!" %userid)
else:
print("Succeeded to grant user %s!" % userid) | Grant user.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
:network_info: dict of network info | entailment |
def start_guest(userid):
""" Power on the vm.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
"""
# Check the power state before starting guest.
power_state_info = client.send_request('guest_get_power_state', userid)
print("\nPower state is: %s." % power_state_info['output'])
# start guest.
guest_start_info = client.send_request('guest_start', userid)
if guest_start_info['overallRC']:
raise RuntimeError('Failed to start guest %s!\n%s' %
(userid, guest_start_info))
else:
print("Succeeded to start guest %s!" % userid)
# Check the power state after starting guest.
power_state_info = client.send_request('guest_get_power_state', userid)
print("Power state is: %s." % power_state_info['output'])
if guest_start_info['overallRC']:
print("Guest_start error: %s" % guest_start_info) | Power on the vm.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8 | entailment |
def _run_guest(userid, image_path, os_version, profile,
cpu, memory, network_info, vswitch_name, disks_list):
""" Deploy and provide a virtual machine.
Input parameters:
:userid: USERID of the guest, no more than 8
:image_path: image file path
:os_version: os version of the image file
:profile: profile of the userid
:cpu: the number of vcpus
:memory: memory
:network_info: dict of network info. Members are:
:ip_addr: ip address of vm
:gateway: gateway of network
:cidr: CIDR
:vswitch_name: vswitch name
:disks_list: list of disks to add. For example:
disks_list = [{'size': '3g',
'is_boot_disk': True,
'disk_pool': 'ECDK: xcateckd'}]
"""
print("Start deploying a virtual machine:")
# Import image if not exists.
import_image(image_path, os_version)
# Start time.
spawn_start = time.time()
# Create guest.
create_guest(userid, cpu, memory, disks_list, profile)
# Deploy image to root disk.
image_name = os.path.basename(image_path)
deploy_guest(userid, image_name)
# Create network device and configure network interface.
create_network(userid, os_version, network_info)
# Couple to vswitch.
coupleTo_vswitch(userid, vswitch_name)
# Grant user.
grant_user(userid, vswitch_name)
# Power on the vm.
start_guest(userid)
# End the time.
spawn_time = time.time() - spawn_start
print("Instance-%s spawned succeeded in %s seconds!" %
(userid, spawn_time))
# Describe guest.
describe_guest(userid) | Deploy and provide a virtual machine.
Input parameters:
:userid: USERID of the guest, no more than 8
:image_path: image file path
:os_version: os version of the image file
:profile: profile of the userid
:cpu: the number of vcpus
:memory: memory
:network_info: dict of network info. Members are:
:ip_addr: ip address of vm
:gateway: gateway of network
:cidr: CIDR
:vswitch_name: vswitch name
:disks_list: list of disks to add. For example:
disks_list = [{'size': '3g',
'is_boot_disk': True,
'disk_pool': 'ECDK: xcateckd'}] | entailment |
def _user_input_properties():
""" User input the properties of guest, image, and network. """
global GUEST_USERID
global GUEST_PROFILE
global GUEST_VCPUS
global GUEST_MEMORY
global GUEST_ROOT_DISK_SIZE
global DISK_POOL
global IMAGE_PATH
global IMAGE_OS_VERSION
global GUEST_IP_ADDR
global GATEWAY
global CIDR
global VSWITCH_NAME
global NETWORK_INFO
global DISKS_LIST
pythonVersion = sys.version[0]
print("Your python interpreter's version is %s." % pythonVersion)
if pythonVersion == '2':
print("Input properties with string type in ''.")
else:
print("Input properties without ''.")
print("Please input guest properties:")
GUEST_USERID = input("guest_userid = ")
GUEST_PROFILE = input("guest_profile = ")
GUEST_VCPUS = int(input("guest_vcpus = "))
GUEST_MEMORY = int(input("guest_memory (in Megabytes) = "))
GUEST_ROOT_DISK_SIZE = int(input("guest_root_disk_size (in Gigabytes) = "))
GUEST_POOL = input("disk_pool = ")
print("\n")
IMAGE_PATH = input("image_path = ")
IMAGE_OS_VERSION = input("image_os_version = ")
print("\n")
GUEST_IP_ADDR = input("guest_ip_addr = ")
GATEWAY = input("gateway = ")
CIDR = input("cidr = ")
VSWITCH_NAME = input("vswitch_name = ")
NETWORK_INFO = [{'ip_addr': GUEST_IP_ADDR, 'gateway_addr': GATEWAY, 'cidr': CIDR}]
DISKS_LIST = [{'size': '%dg' % GUEST_ROOT_DISK_SIZE,
'is_boot_disk': True, 'disk_pool': GUEST_POOL}] | User input the properties of guest, image, and network. | entailment |
def run_guest():
"""
A sample for quickly deploy and start a virtual guest.
"""
# user input the properties of guest, image and network.
_user_input_properties()
# run a guest.
_run_guest(GUEST_USERID, IMAGE_PATH, IMAGE_OS_VERSION, GUEST_PROFILE,
GUEST_VCPUS, GUEST_MEMORY, NETWORK_INFO, VSWITCH_NAME, DISKS_LIST) | A sample for quickly deploy and start a virtual guest. | entailment |
def package_version(filename, varname):
"""Return package version string by reading `filename` and retrieving its
module-global variable `varnam`."""
_locals = {}
with open(filename) as fp:
exec(fp.read(), None, _locals)
return _locals[varname] | Return package version string by reading `filename` and retrieving its
module-global variable `varnam`. | entailment |
def request(self, requestData, **kwArgs):
"""
Process a request.
Input:
Request as either a string or a list.
captureLogs=<True|False>
Enables or disables log capture per request.
This overrides the value from SMT.
requestId=<id> to pass a value for the request Id instead of
using one generated by SMT.
Output:
Dictionary containing the results. See ReqHandle.buildReturnDict()
for information on the contents of the dictionary.
"""
self.reqCnt = self.reqCnt + 1
# Determine whether the request will be capturing logs
if 'captureLogs' in kwArgs.keys():
logFlag = kwArgs['captureLogs']
else:
logFlag = self.captureLogs
# Pass along or generate a request Id
if 'requestId' in kwArgs.keys():
requestId = kwArgs['requestId']
else:
requestId = str(self.reqIdPrefix) + str(self.reqCnt)
rh = ReqHandle(
requestId=requestId,
captureLogs=logFlag,
smt=self)
rh.parseCmdline(requestData)
if rh.results['overallRC'] == 0:
rh.printSysLog("Processing: " + rh.requestString)
rh.driveFunction()
return rh.results | Process a request.
Input:
Request as either a string or a list.
captureLogs=<True|False>
Enables or disables log capture per request.
This overrides the value from SMT.
requestId=<id> to pass a value for the request Id instead of
using one generated by SMT.
Output:
Dictionary containing the results. See ReqHandle.buildReturnDict()
for information on the contents of the dictionary. | entailment |
def invokeSmapiApi(rh):
"""
Invoke a SMAPI API.
Input:
Request Handle with the following properties:
function - 'SMAPI'
subfunction - 'API'
userid - 'HYPERVISOR'
parms['apiName'] - Name of API as defined by SMCLI
parms['operands'] - List (array) of operands to send or
an empty list.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter smapi.invokeSmapiApi")
if rh.userid != 'HYPERVISOR':
userid = rh.userid
else:
userid = 'dummy'
parms = ["-T", userid]
if 'operands' in rh.parms:
parms.extend(rh.parms['operands'])
results = invokeSMCLI(rh, rh.parms['apiName'], parms)
if results['overallRC'] == 0:
rh.printLn("N", results['response'])
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit smapi.invokeCmd, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC'] | Invoke a SMAPI API.
Input:
Request Handle with the following properties:
function - 'SMAPI'
subfunction - 'API'
userid - 'HYPERVISOR'
parms['apiName'] - Name of API as defined by SMCLI
parms['operands'] - List (array) of operands to send or
an empty list.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error | entailment |
def _parse(self, init_info):
"""Initialize a FCP device object from several lines of string
describing properties of the FCP device.
Here is a sample:
opnstk1: FCP device number: B83D
opnstk1: Status: Free
opnstk1: NPIV world wide port number: NONE
opnstk1: Channel path ID: 59
opnstk1: Physical world wide port number: 20076D8500005181
The format comes from the response of xCAT, do not support
arbitrary format.
"""
if isinstance(init_info, list) and (len(init_info) == 5):
self._dev_no = self._get_dev_number_from_line(init_info[0])
self._npiv_port = self._get_wwpn_from_line(init_info[2])
self._chpid = self._get_chpid_from_line(init_info[3])
self._physical_port = self._get_wwpn_from_line(init_info[4]) | Initialize a FCP device object from several lines of string
describing properties of the FCP device.
Here is a sample:
opnstk1: FCP device number: B83D
opnstk1: Status: Free
opnstk1: NPIV world wide port number: NONE
opnstk1: Channel path ID: 59
opnstk1: Physical world wide port number: 20076D8500005181
The format comes from the response of xCAT, do not support
arbitrary format. | entailment |
def init_fcp(self, assigner_id):
"""init_fcp to init the FCP managed by this host"""
# TODO master_fcp_list (zvm_zhcp_fcp_list) really need?
fcp_list = CONF.volume.fcp_list
if fcp_list == '':
errmsg = ("because CONF.volume.fcp_list is empty, "
"no volume functions available")
LOG.info(errmsg)
return
self._fcp_info = self._init_fcp_pool(fcp_list, assigner_id)
self._sync_db_fcp_list() | init_fcp to init the FCP managed by this host | entailment |
def _init_fcp_pool(self, fcp_list, assigner_id):
"""The FCP infomation got from smt(zthin) looks like :
host: FCP device number: xxxx
host: Status: Active
host: NPIV world wide port number: xxxxxxxx
host: Channel path ID: xx
host: Physical world wide port number: xxxxxxxx
......
host: FCP device number: xxxx
host: Status: Active
host: NPIV world wide port number: xxxxxxxx
host: Channel path ID: xx
host: Physical world wide port number: xxxxxxxx
"""
complete_fcp_set = self._expand_fcp_list(fcp_list)
fcp_info = self._get_all_fcp_info(assigner_id)
lines_per_item = 5
num_fcps = len(fcp_info) // lines_per_item
for n in range(0, num_fcps):
fcp_init_info = fcp_info[(5 * n):(5 * (n + 1))]
fcp = FCP(fcp_init_info)
dev_no = fcp.get_dev_no()
if dev_no in complete_fcp_set:
if fcp.is_valid():
self._fcp_pool[dev_no] = fcp
else:
errmsg = ("Find an invalid FCP device with properties {"
"dev_no: %(dev_no)s, "
"NPIV_port: %(NPIV_port)s, "
"CHPID: %(CHPID)s, "
"physical_port: %(physical_port)s} !") % {
'dev_no': fcp.get_dev_no(),
'NPIV_port': fcp.get_npiv_port(),
'CHPID': fcp.get_chpid(),
'physical_port': fcp.get_physical_port()}
LOG.warning(errmsg)
else:
# normal, FCP not used by cloud connector at all
msg = "Found a fcp %s not in fcp_list" % dev_no
LOG.debug(msg) | The FCP infomation got from smt(zthin) looks like :
host: FCP device number: xxxx
host: Status: Active
host: NPIV world wide port number: xxxxxxxx
host: Channel path ID: xx
host: Physical world wide port number: xxxxxxxx
......
host: FCP device number: xxxx
host: Status: Active
host: NPIV world wide port number: xxxxxxxx
host: Channel path ID: xx
host: Physical world wide port number: xxxxxxxx | entailment |
def _expand_fcp_list(fcp_list):
"""Expand fcp list string into a python list object which contains
each fcp devices in the list string. A fcp list is composed of fcp
device addresses, range indicator '-', and split indicator ';'.
For example, if fcp_list is
"0011-0013;0015;0017-0018", expand_fcp_list(fcp_list) will return
[0011, 0012, 0013, 0015, 0017, 0018].
"""
LOG.debug("Expand FCP list %s" % fcp_list)
if not fcp_list:
return set()
range_pattern = '[0-9a-fA-F]{1,4}(-[0-9a-fA-F]{1,4})?'
match_pattern = "^(%(range)s)(;%(range)s)*$" % {'range': range_pattern}
if not re.match(match_pattern, fcp_list):
errmsg = ("Invalid FCP address %s") % fcp_list
raise exception.SDKInternalError(msg=errmsg)
fcp_devices = set()
for _range in fcp_list.split(';'):
if '-' not in _range:
# single device
fcp_addr = int(_range, 16)
fcp_devices.add("%04x" % fcp_addr)
else:
# a range of address
(_min, _max) = _range.split('-')
_min = int(_min, 16)
_max = int(_max, 16)
for fcp_addr in range(_min, _max + 1):
fcp_devices.add("%04x" % fcp_addr)
# remove duplicate entries
return fcp_devices | Expand fcp list string into a python list object which contains
each fcp devices in the list string. A fcp list is composed of fcp
device addresses, range indicator '-', and split indicator ';'.
For example, if fcp_list is
"0011-0013;0015;0017-0018", expand_fcp_list(fcp_list) will return
[0011, 0012, 0013, 0015, 0017, 0018]. | entailment |
def _add_fcp(self, fcp):
"""add fcp to db if it's not in db but in fcp list and init it"""
try:
LOG.info("fcp %s found in CONF.volume.fcp_list, add it to db" %
fcp)
self.db.new(fcp)
except Exception:
LOG.info("failed to add fcp %s into db", fcp) | add fcp to db if it's not in db but in fcp list and init it | entailment |
def _sync_db_fcp_list(self):
"""sync db records from given fcp list, for example, you need
warn if some FCP already removed while it's still in use,
or info about the new FCP added"""
fcp_db_list = self.db.get_all()
for fcp_rec in fcp_db_list:
if not fcp_rec[0].lower() in self._fcp_pool:
self._report_orphan_fcp(fcp_rec[0])
for fcp_conf_rec, v in self._fcp_pool.items():
res = self.db.get_from_fcp(fcp_conf_rec)
# if not found this record, a [] will be returned
if len(res) == 0:
self._add_fcp(fcp_conf_rec) | sync db records from given fcp list, for example, you need
warn if some FCP already removed while it's still in use,
or info about the new FCP added | entailment |
def find_and_reserve_fcp(self, assigner_id):
"""reserve the fcp to assigner_id
The function to reserve a fcp for user
1. Check whether assigner_id has a fcp already
if yes, make the reserve of that record to 1
2. No fcp, then find a fcp and reserve it
fcp will be returned, or None indicate no fcp
"""
fcp_list = self.db.get_from_assigner(assigner_id)
if not fcp_list:
new_fcp = self.db.find_and_reserve()
if new_fcp is None:
LOG.info("no more fcp to be allocated")
return None
LOG.debug("allocated %s fcp for %s assigner" %
(new_fcp, assigner_id))
return new_fcp
else:
# we got it from db, let's reuse it
old_fcp = fcp_list[0][0]
self.db.reserve(fcp_list[0][0])
return old_fcp | reserve the fcp to assigner_id
The function to reserve a fcp for user
1. Check whether assigner_id has a fcp already
if yes, make the reserve of that record to 1
2. No fcp, then find a fcp and reserve it
fcp will be returned, or None indicate no fcp | entailment |
def increase_fcp_usage(self, fcp, assigner_id=None):
"""Incrase fcp usage of given fcp
Returns True if it's a new fcp, otherwise return False
"""
# TODO: check assigner_id to make sure on the correct fcp record
connections = self.db.get_connections_from_assigner(assigner_id)
new = False
if connections == 0:
self.db.assign(fcp, assigner_id)
new = True
else:
self.db.increase_usage(fcp)
return new | Incrase fcp usage of given fcp
Returns True if it's a new fcp, otherwise return False | entailment |
def get_available_fcp(self):
"""get all the fcps not reserved"""
# get the unreserved FCP devices belongs to assigner_id
available_list = []
free_unreserved = self.db.get_all_free_unreserved()
for item in free_unreserved:
available_list.append(item[0])
return available_list | get all the fcps not reserved | entailment |
def _attach(self, fcp, assigner_id, target_wwpn, target_lun,
multipath, os_version, mount_point):
"""Attach a volume
First, we need translate fcp into local wwpn, then
dedicate fcp to the user if it's needed, after that
call smt layer to call linux command
"""
LOG.info('Start to attach device to %s' % assigner_id)
self.fcp_mgr.init_fcp(assigner_id)
new = self.fcp_mgr.increase_fcp_usage(fcp, assigner_id)
try:
if new:
self._dedicate_fcp(fcp, assigner_id)
self._add_disk(fcp, assigner_id, target_wwpn, target_lun,
multipath, os_version, mount_point)
except exception.SDKBaseException as err:
errmsg = 'rollback attach because error:' + err.format_message()
LOG.error(errmsg)
connections = self.fcp_mgr.decrease_fcp_usage(fcp, assigner_id)
# if connections less than 1, undedicate the device
if not connections:
with zvmutils.ignore_errors():
self._undedicate_fcp(fcp, assigner_id)
raise exception.SDKBaseException(msg=errmsg)
# TODO: other exceptions?
LOG.info('Attaching device to %s is done.' % assigner_id) | Attach a volume
First, we need translate fcp into local wwpn, then
dedicate fcp to the user if it's needed, after that
call smt layer to call linux command | entailment |
def _detach(self, fcp, assigner_id, target_wwpn, target_lun,
multipath, os_version, mount_point):
"""Detach a volume from a guest"""
LOG.info('Start to detach device from %s' % assigner_id)
connections = self.fcp_mgr.decrease_fcp_usage(fcp, assigner_id)
try:
self._remove_disk(fcp, assigner_id, target_wwpn, target_lun,
multipath, os_version, mount_point)
if not connections:
self._undedicate_fcp(fcp, assigner_id)
except (exception.SDKBaseException,
exception.SDKSMTRequestFailed) as err:
errmsg = 'rollback detach because error:' + err.format_message()
LOG.error(errmsg)
self.fcp_mgr.increase_fcp_usage(fcp, assigner_id)
with zvmutils.ignore_errors():
self._add_disk(fcp, assigner_id, target_wwpn, target_lun,
multipath, os_version, mount_point)
raise exception.SDKBaseException(msg=errmsg)
LOG.info('Detaching device to %s is done.' % assigner_id) | Detach a volume from a guest | entailment |
def detach(self, connection_info):
"""Detach a volume from a guest
"""
fcp = connection_info['zvm_fcp']
fcp = fcp.lower()
target_wwpn = connection_info['target_wwpn']
target_lun = connection_info['target_lun']
assigner_id = connection_info['assigner_id']
assigner_id = assigner_id.upper()
multipath = connection_info['multipath']
os_version = connection_info['os_version']
mount_point = connection_info['mount_point']
if not zvmutils.check_userid_exist(assigner_id):
LOG.error("Guest '%s' does not exist" % assigner_id)
raise exception.SDKObjectNotExistError(
obj_desc=("Guest '%s'" % assigner_id), modID='volume')
else:
self._detach(fcp, assigner_id, target_wwpn, target_lun,
multipath, os_version, mount_point) | Detach a volume from a guest | entailment |
def get_volume_connector(self, assigner_id):
"""Get connector information of the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'zvm_fcp': fcp
'wwpns': [wwpn]
'host': host
}
"""
empty_connector = {'zvm_fcp': [], 'wwpns': [], 'host': ''}
# init fcp pool
self.fcp_mgr.init_fcp(assigner_id)
fcp_list = self.fcp_mgr.get_available_fcp()
if not fcp_list:
errmsg = "No available FCP device found."
LOG.warning(errmsg)
return empty_connector
wwpns = []
for fcp_no in fcp_list:
wwpn = self.fcp_mgr.get_wwpn(fcp_no)
if not wwpn:
errmsg = "FCP device %s has no available WWPN." % fcp_no
LOG.warning(errmsg)
else:
wwpns.append(wwpn)
if not wwpns:
errmsg = "No available WWPN found."
LOG.warning(errmsg)
return empty_connector
inv_info = self._smtclient.get_host_info()
zvm_host = inv_info['zvm_host']
if zvm_host == '':
errmsg = "zvm host not specified."
LOG.warning(errmsg)
return empty_connector
connector = {'zvm_fcp': fcp_list,
'wwpns': wwpns,
'host': zvm_host}
LOG.debug('get_volume_connector returns %s for %s' %
(connector, assigner_id))
return connector | Get connector information of the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'zvm_fcp': fcp
'wwpns': [wwpn]
'host': host
} | entailment |
def salsa20_8(B, x, src, s_start, dest, d_start):
"""Salsa20/8 http://en.wikipedia.org/wiki/Salsa20"""
# Merged blockxor for speed
for i in xrange(16):
x[i] = B[i] = B[i] ^ src[s_start + i]
# This is the actual Salsa 20/8: four identical double rounds
for i in xrange(4):
a = (x[0]+x[12]) & 0xffffffff
b = (x[5]+x[1]) & 0xffffffff
x[4] ^= (a << 7) | (a >> 25)
x[9] ^= (b << 7) | (b >> 25)
a = (x[10]+x[6]) & 0xffffffff
b = (x[15]+x[11]) & 0xffffffff
x[14] ^= (a << 7) | (a >> 25)
x[3] ^= (b << 7) | (b >> 25)
a = (x[4]+x[0]) & 0xffffffff
b = (x[9]+x[5]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[14]+x[10]) & 0xffffffff
b = (x[3]+x[15]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[8]+x[4]) & 0xffffffff
b = (x[13]+x[9]) & 0xffffffff
x[12] ^= (a << 13) | (a >> 19)
x[1] ^= (b << 13) | (b >> 19)
a = (x[2]+x[14]) & 0xffffffff
b = (x[7]+x[3]) & 0xffffffff
x[6] ^= (a << 13) | (a >> 19)
x[11] ^= (b << 13) | (b >> 19)
a = (x[12]+x[8]) & 0xffffffff
b = (x[1]+x[13]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[6]+x[2]) & 0xffffffff
b = (x[11]+x[7]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
a = (x[0]+x[3]) & 0xffffffff
b = (x[5]+x[4]) & 0xffffffff
x[1] ^= (a << 7) | (a >> 25)
x[6] ^= (b << 7) | (b >> 25)
a = (x[10]+x[9]) & 0xffffffff
b = (x[15]+x[14]) & 0xffffffff
x[11] ^= (a << 7) | (a >> 25)
x[12] ^= (b << 7) | (b >> 25)
a = (x[1]+x[0]) & 0xffffffff
b = (x[6]+x[5]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[11]+x[10]) & 0xffffffff
b = (x[12]+x[15]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[2]+x[1]) & 0xffffffff
b = (x[7]+x[6]) & 0xffffffff
x[3] ^= (a << 13) | (a >> 19)
x[4] ^= (b << 13) | (b >> 19)
a = (x[8]+x[11]) & 0xffffffff
b = (x[13]+x[12]) & 0xffffffff
x[9] ^= (a << 13) | (a >> 19)
x[14] ^= (b << 13) | (b >> 19)
a = (x[3]+x[2]) & 0xffffffff
b = (x[4]+x[7]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[9]+x[8]) & 0xffffffff
b = (x[14]+x[13]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
# While we are handling the data, write it to the correct dest.
# The latter half is still part of salsa20
for i in xrange(16):
dest[d_start + i] = B[i] = (x[i] + B[i]) & 0xffffffff | Salsa20/8 http://en.wikipedia.org/wiki/Salsa20 | entailment |
def blockmix_salsa8(BY, Yi, r):
"""Blockmix; Used by SMix"""
start = (2 * r - 1) * 16
X = BY[start:start+16] # BlockMix - 1
tmp = [0]*16
for i in xrange(2 * r): # BlockMix - 2
#blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)
salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)
#array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4
for i in xrange(r): # BlockMix - 6
BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)]
BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)] | Blockmix; Used by SMix | entailment |
def smix(B, Bi, r, N, V, X):
"""SMix; a specific case of ROMix based on Salsa20/8"""
X[0:(0)+(32 * r)] = B[Bi:(Bi)+(32 * r)]
for i in xrange(N): # ROMix - 2
V[i * (32 * r):(i * (32 * r))+(32 * r)] = X[0:(0)+(32 * r)]
blockmix_salsa8(X, 32 * r, r) # ROMix - 4
for i in xrange(N): # ROMix - 6
j = integerify(X, r) & (N - 1) # ROMix - 7
blockxor(V, j * (32 * r), X, 0, 32 * r) # ROMix - 8(inner)
blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)
B[Bi:(Bi)+(32 * r)] = X[0:(0)+(32 * r)] | SMix; a specific case of ROMix based on Salsa20/8 | entailment |
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
# Everything is lists of 32-bit uints for all but pbkdf2
try:
B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)
B = list(struct.unpack('<%dI' % (len(B) // 4), B))
XY = [0] * (64 * r)
V = [0] * (32 * r * N)
except (MemoryError, OverflowError):
raise ValueError("scrypt parameters don't fit in memory")
for i in xrange(p):
smix(B, i * 32 * r, r, N, V, XY)
B = struct.pack('<%dI' % len(B), *B)
return _pbkdf2('sha256', password, B, 1, olen) | Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful. | entailment |
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix) | Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.) | entailment |
def _update_versions():
"""Update :attr:`_versions` with the registered plotter methods"""
for pm_name in plot._plot_methods:
pm = getattr(plot, pm_name)
plugin = pm._plugin
if (plugin is not None and plugin not in _versions and
pm.module in sys.modules):
_versions.update(get_versions(key=lambda s: s == plugin)) | Update :attr:`_versions` with the registered plotter methods | entailment |
def multiple_subplots(rows=1, cols=1, maxplots=None, n=1, delete=True,
for_maps=False, *args, **kwargs):
"""
Function to create subplots.
This function creates so many subplots on so many figures until the
specified number `n` is reached.
Parameters
----------
rows: int
The number of subplots per rows
cols: int
The number of subplots per column
maxplots: int
The number of subplots per figure (if None, it will be row*cols)
n: int
number of subplots to create
delete: bool
If True, the additional subplots per figure are deleted
for_maps: bool
If True this is a simple shortcut for setting
``subplot_kw=dict(projection=cartopy.crs.PlateCarree())`` and is
useful if you want to use the :attr:`~ProjectPlotter.mapplot`,
:attr:`~ProjectPlotter.mapvector` or
:attr:`~ProjectPlotter.mapcombined` plotting methods
``*args`` and ``**kwargs``
anything that is passed to the :func:`matplotlib.pyplot.subplots`
function
Returns
-------
list
list of maplotlib.axes.SubplotBase instances"""
import matplotlib.pyplot as plt
axes = np.array([])
maxplots = maxplots or rows * cols
kwargs.setdefault('figsize', [
min(8.*cols, 16), min(6.5*rows, 12)])
if for_maps:
import cartopy.crs as ccrs
subplot_kw = kwargs.setdefault('subplot_kw', {})
subplot_kw['projection'] = ccrs.PlateCarree()
for i in range(0, n, maxplots):
fig, ax = plt.subplots(rows, cols, *args, **kwargs)
try:
axes = np.append(axes, ax.ravel()[:maxplots])
if delete:
for iax in range(maxplots, rows * cols):
fig.delaxes(ax.ravel()[iax])
except AttributeError: # got a single subplot
axes = np.append(axes, [ax])
if i + maxplots > n and delete:
for ax2 in axes[n:]:
fig.delaxes(ax2)
axes = axes[:n]
return axes | Function to create subplots.
This function creates so many subplots on so many figures until the
specified number `n` is reached.
Parameters
----------
rows: int
The number of subplots per rows
cols: int
The number of subplots per column
maxplots: int
The number of subplots per figure (if None, it will be row*cols)
n: int
number of subplots to create
delete: bool
If True, the additional subplots per figure are deleted
for_maps: bool
If True this is a simple shortcut for setting
``subplot_kw=dict(projection=cartopy.crs.PlateCarree())`` and is
useful if you want to use the :attr:`~ProjectPlotter.mapplot`,
:attr:`~ProjectPlotter.mapvector` or
:attr:`~ProjectPlotter.mapcombined` plotting methods
``*args`` and ``**kwargs``
anything that is passed to the :func:`matplotlib.pyplot.subplots`
function
Returns
-------
list
list of maplotlib.axes.SubplotBase instances | entailment |
def _only_main(func):
"""Call the given `func` only from the main project"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.is_main:
return getattr(self.main, func.__name__)(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapper | Call the given `func` only from the main project | entailment |
def gcp(main=False):
"""
Get the current project
Parameters
----------
main: bool
If True, the current main project is returned, otherwise the current
subproject is returned.
See Also
--------
scp: Sets the current project
project: Creates a new project"""
if main:
return project() if _current_project is None else _current_project
else:
return gcp(True) if _current_subproject is None else \
_current_subproject | Get the current project
Parameters
----------
main: bool
If True, the current main project is returned, otherwise the current
subproject is returned.
See Also
--------
scp: Sets the current project
project: Creates a new project | entailment |
def _scp(p, main=False):
"""scp version that allows a bit more control over whether the project is a
main project or not"""
global _current_subproject
global _current_project
if p is None:
mp = project() if main or _current_project is None else \
_current_project
_current_subproject = Project(main=mp)
elif not main:
_current_subproject = p
else:
_current_project = p | scp version that allows a bit more control over whether the project is a
main project or not | entailment |
def project(num=None, *args, **kwargs):
"""
Create a new main project
Parameters
----------
num: int
The number of the project
%(Project.parameters.no_num)s
Returns
-------
Project
The with the given `num` (if it does not already exist, it is created)
See Also
--------
scp: Sets the current project
gcp: Returns the current project
"""
numbers = [project.num for project in _open_projects]
if num in numbers:
return _open_projects[numbers.index(num)]
if num is None:
num = max(numbers) + 1 if numbers else 1
project = PROJECT_CLS.new(num, *args, **kwargs)
_open_projects.append(project)
return project | Create a new main project
Parameters
----------
num: int
The number of the project
%(Project.parameters.no_num)s
Returns
-------
Project
The with the given `num` (if it does not already exist, it is created)
See Also
--------
scp: Sets the current project
gcp: Returns the current project | entailment |
def close(num=None, figs=True, data=True, ds=True, remove_only=False):
"""
Close the project
This method closes the current project (figures, data and datasets) or the
project specified by `num`
Parameters
----------
num: int, None or 'all'
if :class:`int`, it specifies the number of the project, if None, the
current subproject is closed, if ``'all'``, all open projects are
closed
%(Project.close.parameters)s
See Also
--------
Project.close"""
kws = dict(figs=figs, data=data, ds=ds, remove_only=remove_only)
cp_num = gcp(True).num
got_cp = False
if num is None:
project = gcp()
scp(None)
project.close(**kws)
elif num == 'all':
for project in _open_projects[:]:
project.close(**kws)
got_cp = got_cp or project.main.num == cp_num
del _open_projects[0]
else:
if isinstance(num, Project):
project = num
else:
project = [project for project in _open_projects
if project.num == num][0]
project.close(**kws)
try:
_open_projects.remove(project)
except ValueError:
pass
got_cp = got_cp or project.main.num == cp_num
if got_cp:
if _open_projects:
# set last opened project to the current
scp(_open_projects[-1])
else:
_scp(None, True) | Close the project
This method closes the current project (figures, data and datasets) or the
project specified by `num`
Parameters
----------
num: int, None or 'all'
if :class:`int`, it specifies the number of the project, if None, the
current subproject is closed, if ``'all'``, all open projects are
closed
%(Project.close.parameters)s
See Also
--------
Project.close | entailment |
def register_plotter(identifier, module, plotter_name, plotter_cls=None,
sorter=True, plot_func=True, import_plotter=None,
**kwargs):
"""
Register a :class:`psyplot.plotter.Plotter` for the projects
This function registers plotters for the :class:`Project` class to allow
a dynamical handling of different plotter classes.
Parameters
----------
%(Project._register_plotter.parameters.no_plotter_cls)s
sorter: bool, optional
If True, the :class:`Project` class gets a new property with the name
of the specified `identifier` which allows you to access the instances
that are plotted by the specified `plotter_name`
plot_func: bool, optional
If True, the :class:`ProjectPlotter` (the class that holds the
plotting method for the :class:`Project` class and can be accessed via
the :attr:`Project.plot` attribute) gets an additional method to plot
via the specified `plotter_name` (see `Other Parameters` below.)
import_plotter: bool, optional
If True, the plotter is automatically imported, otherwise it is only
imported when it is needed. If `import_plotter` is None, then it is
determined by the :attr:`psyplot.rcParams` ``'project.auto_import'``
item.
Other Parameters
----------------
%(ProjectPlotter._register_plotter.other_parameters)s
"""
if plotter_cls is None:
if ((import_plotter is None and rcParams['project.auto_import']) or
import_plotter):
try:
plotter_cls = getattr(import_module(module), plotter_name)
except Exception as e:
critical(("Could not import %s!\n" % module) +
e.message if six.PY2 else str(e))
return
if sorter:
if hasattr(Project, identifier):
raise ValueError(
"Project class already has a %s attribute" % identifier)
Project._register_plotter(
identifier, module, plotter_name, plotter_cls)
if plot_func:
if hasattr(ProjectPlotter, identifier):
raise ValueError(
"Project class already has a %s attribute" % identifier)
ProjectPlotter._register_plotter(
identifier, module, plotter_name, plotter_cls, **kwargs)
DatasetPlotter._register_plotter(
identifier, module, plotter_name, plotter_cls, **kwargs)
DataArrayPlotter._register_plotter(
identifier, module, plotter_name, plotter_cls, **kwargs)
if identifier not in registered_plotters:
kwargs.update(dict(
module=module, plotter_name=plotter_name, sorter=sorter,
plot_func=plot_func, import_plotter=import_plotter))
registered_plotters[identifier] = kwargs
return | Register a :class:`psyplot.plotter.Plotter` for the projects
This function registers plotters for the :class:`Project` class to allow
a dynamical handling of different plotter classes.
Parameters
----------
%(Project._register_plotter.parameters.no_plotter_cls)s
sorter: bool, optional
If True, the :class:`Project` class gets a new property with the name
of the specified `identifier` which allows you to access the instances
that are plotted by the specified `plotter_name`
plot_func: bool, optional
If True, the :class:`ProjectPlotter` (the class that holds the
plotting method for the :class:`Project` class and can be accessed via
the :attr:`Project.plot` attribute) gets an additional method to plot
via the specified `plotter_name` (see `Other Parameters` below.)
import_plotter: bool, optional
If True, the plotter is automatically imported, otherwise it is only
imported when it is needed. If `import_plotter` is None, then it is
determined by the :attr:`psyplot.rcParams` ``'project.auto_import'``
item.
Other Parameters
----------------
%(ProjectPlotter._register_plotter.other_parameters)s | entailment |
def unregister_plotter(identifier, sorter=True, plot_func=True):
"""
Unregister a :class:`psyplot.plotter.Plotter` for the projects
Parameters
----------
identifier: str
Name of the attribute that is used to filter for the instances
belonging to this plotter or to create plots with this plotter
sorter: bool
If True, the identifier will be unregistered from the :class:`Project`
class
plot_func: bool
If True, the identifier will be unregistered from the
:class:`ProjectPlotter` class
"""
d = registered_plotters.get(identifier, {})
if sorter and hasattr(Project, identifier):
delattr(Project, identifier)
d['sorter'] = False
if plot_func and hasattr(ProjectPlotter, identifier):
for cls in [ProjectPlotter, DatasetPlotter, DataArrayPlotter]:
delattr(cls, identifier)
try:
delattr(plot, '_' + identifier)
except AttributeError:
pass
d['plot_func'] = False
if sorter and plot_func:
registered_plotters.pop(identifier, None) | Unregister a :class:`psyplot.plotter.Plotter` for the projects
Parameters
----------
identifier: str
Name of the attribute that is used to filter for the instances
belonging to this plotter or to create plots with this plotter
sorter: bool
If True, the identifier will be unregistered from the :class:`Project`
class
plot_func: bool
If True, the identifier will be unregistered from the
:class:`ProjectPlotter` class | entailment |
def _fmtos(self):
"""An iterator over formatoption objects
Contains only the formatoption whose keys are in all plotters in this
list"""
plotters = self.plotters
if len(plotters) == 0:
return {}
p0 = plotters[0]
if len(plotters) == 1:
return p0._fmtos
return (getattr(p0, key) for key in set(p0).intersection(
*map(set, plotters[1:]))) | An iterator over formatoption objects
Contains only the formatoption whose keys are in all plotters in this
list | entailment |
def figs(self):
"""A mapping from figures to data objects with the plotter in this
figure"""
ret = utils.DefaultOrderedDict(lambda: self[1:0])
for arr in self:
if arr.psy.plotter is not None:
ret[arr.psy.plotter.ax.get_figure()].append(arr)
return OrderedDict(ret) | A mapping from figures to data objects with the plotter in this
figure | entailment |
def axes(self):
"""A mapping from axes to data objects with the plotter in this axes
"""
ret = utils.DefaultOrderedDict(lambda: self[1:0])
for arr in self:
if arr.psy.plotter is not None:
ret[arr.psy.plotter.ax].append(arr)
return OrderedDict(ret) | A mapping from axes to data objects with the plotter in this axes | entailment |
def logger(self):
""":class:`logging.Logger` of this instance"""
if not self.is_main:
return self.main.logger
try:
return self._logger
except AttributeError:
name = '%s.%s.%s' % (self.__module__, self.__class__.__name__,
self.num)
self._logger = logging.getLogger(name)
self.logger.debug('Initializing...')
return self._logger | :class:`logging.Logger` of this instance | entailment |
def datasets(self):
"""A mapping from dataset numbers to datasets in this list"""
return {key: val['ds'] for key, val in six.iteritems(
self._get_ds_descriptions(self.array_info(ds_description=['ds'])))} | A mapping from dataset numbers to datasets in this list | entailment |
def _register_plotter(cls, identifier, module, plotter_name,
plotter_cls=None):
"""
Register a plotter in the :class:`Project` class to easy access it
Parameters
----------
identifier: str
Name of the attribute that is used to filter for the instances
belonging to this plotter
module: str
The module from where to import the `plotter_name`
plotter_name: str
The name of the plotter class in `module`
plotter_cls: type
The imported class of `plotter_name`. If None, it will be imported
when it is needed
"""
if plotter_cls is not None: # plotter has already been imported
def get_x(self):
return self(plotter_cls)
else:
def get_x(self):
return self(getattr(import_module(module), plotter_name))
setattr(cls, identifier, property(get_x, doc=(
"List of data arrays that are plotted by :class:`%s.%s`"
" plotters") % (module, plotter_name)))
cls._registered_plotters[identifier] = (module, plotter_name) | Register a plotter in the :class:`Project` class to easy access it
Parameters
----------
identifier: str
Name of the attribute that is used to filter for the instances
belonging to this plotter
module: str
The module from where to import the `plotter_name`
plotter_name: str
The name of the plotter class in `module`
plotter_cls: type
The imported class of `plotter_name`. If None, it will be imported
when it is needed | entailment |
def disable(self):
"""Disables the plotters in this list"""
for arr in self:
if arr.psy.plotter:
arr.psy.plotter.disabled = True | Disables the plotters in this list | entailment |
def close(self, figs=True, data=False, ds=False, remove_only=False):
"""
Close this project instance
Parameters
----------
figs: bool
Close the figures
data: bool
delete the arrays from the (main) project
ds: bool
If True, close the dataset as well
remove_only: bool
If True and `figs` is True, the figures are not closed but the
plotters are removed"""
import matplotlib.pyplot as plt
close_ds = ds
for arr in self[:]:
if figs and arr.psy.plotter is not None:
if remove_only:
for fmto in arr.psy.plotter._fmtos:
try:
fmto.remove()
except Exception:
pass
else:
plt.close(arr.psy.plotter.ax.get_figure().number)
arr.psy.plotter = None
if data:
self.remove(arr)
if not self.is_main:
try:
self.main.remove(arr)
except ValueError: # arr not in list
pass
if close_ds:
if isinstance(arr, InteractiveList):
for ds in [val['ds'] for val in six.itervalues(
arr._get_ds_descriptions(
arr.array_info(ds_description=['ds'],
standardize_dims=False)))]:
ds.close()
else:
arr.psy.base.close()
if self.is_main and self is gcp(True) and data:
scp(None)
elif self.is_main and self.is_cmp:
self.oncpchange.emit(self)
elif self.main.is_cmp:
self.oncpchange.emit(self.main) | Close this project instance
Parameters
----------
figs: bool
Close the figures
data: bool
delete the arrays from the (main) project
ds: bool
If True, close the dataset as well
remove_only: bool
If True and `figs` is True, the figures are not closed but the
plotters are removed | entailment |
def _add_data(self, plotter_cls, filename_or_obj, fmt={}, make_plot=True,
draw=False, mf_mode=False, ax=None, engine=None, delete=True,
share=False, clear=False, enable_post=None,
concat_dim=_concat_dim_default, load=False,
*args, **kwargs):
"""
Extract data from a dataset and visualize it with the given plotter
Parameters
----------
plotter_cls: type
The subclass of :class:`psyplot.plotter.Plotter` to use for
visualization
filename_or_obj: filename, :class:`xarray.Dataset` or data store
The object (or file name) to open. If not a dataset, the
:func:`psyplot.data.open_dataset` will be used to open a dataset
fmt: dict
Formatoptions that shall be when initializing the plot (you can
however also specify them as extra keyword arguments)
make_plot: bool
If True, the data is plotted at the end. Otherwise you have to
call the :meth:`psyplot.plotter.Plotter.initialize_plot` method or
the :meth:`psyplot.plotter.Plotter.reinit` method by yourself
%(InteractiveBase.start_update.parameters.draw)s
mf_mode: bool
If True, the :func:`psyplot.open_mfdataset` method is used.
Otherwise we use the :func:`psyplot.open_dataset` method which can
open only one single dataset
ax: None, tuple (x, y[, z]) or (list of) matplotlib.axes.Axes
Specifies the subplots on which to plot the new data objects.
- If None, a new figure will be created for each created plotter
- If tuple (x, y[, z]), `x` specifies the number of rows, `y` the
number of columns and the optional third parameter `z` the
maximal number of subplots per figure.
- If :class:`matplotlib.axes.Axes` (or list of those, e.g. created
by the :func:`matplotlib.pyplot.subplots` function), the data
will be plotted on these subplots
%(open_dataset.parameters.engine)s
%(multiple_subplots.parameters.delete)s
share: bool, fmt key or list of fmt keys
Determines whether the first created plotter shares it's
formatoptions with the others. If True, all formatoptions are
shared. Strings or list of strings specify the keys to share.
clear: bool
If True, axes are cleared before making the plot. This is only
necessary if the `ax` keyword consists of subplots with projection
that differs from the one that is needed
enable_post: bool
If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is
enabled and post processing scripts are allowed. If ``None``, this
parameter is set to True if there is a value given for the `post`
formatoption in `fmt` or `kwargs`
%(xarray.open_mfdataset.parameters.concat_dim)s
This parameter only does have an effect if `mf_mode` is True.
load: bool
If True, load the complete dataset into memory before plotting.
This might be useful if the data of other variables in the dataset
has to be accessed multiple times, e.g. for unstructured grids.
%(ArrayList.from_dataset.parameters.no_base)s
Other Parameters
----------------
%(ArrayList.from_dataset.other_parameters.no_args_kwargs)s
``**kwargs``
Any other dimension or formatoption that shall be passed to `dims`
or `fmt` respectively.
Returns
-------
Project
The subproject that contains the new (visualized) data array"""
if not isinstance(filename_or_obj, xarray.Dataset):
if mf_mode:
filename_or_obj = open_mfdataset(filename_or_obj,
engine=engine,
concat_dim=concat_dim)
else:
filename_or_obj = open_dataset(filename_or_obj,
engine=engine)
if load:
old = filename_or_obj
filename_or_obj = filename_or_obj.load()
old.close()
fmt = dict(fmt)
possible_fmts = list(plotter_cls._get_formatoptions())
additional_fmt, kwargs = utils.sort_kwargs(
kwargs, possible_fmts)
fmt.update(additional_fmt)
if enable_post is None:
enable_post = bool(fmt.get('post'))
# create the subproject
sub_project = self.from_dataset(filename_or_obj, **kwargs)
sub_project.main = self
sub_project.no_auto_update = not (
not sub_project.no_auto_update or not self.no_auto_update)
# create the subplots
proj = plotter_cls._get_sample_projection()
if isinstance(ax, tuple):
axes = iter(multiple_subplots(
*ax, n=len(sub_project), subplot_kw={'projection': proj}))
elif ax is None or isinstance(ax, (mpl.axes.SubplotBase,
mpl.axes.Axes)):
axes = repeat(ax)
else:
axes = iter(ax)
clear = clear or (isinstance(ax, tuple) and proj is not None)
for arr in sub_project:
plotter_cls(arr, make_plot=(not bool(share) and make_plot),
draw=False, ax=next(axes), clear=clear,
project=self, enable_post=enable_post, **fmt)
if share:
if share is True:
share = possible_fmts
elif isinstance(share, six.string_types):
share = [share]
else:
share = list(share)
sub_project[0].psy.plotter.share(
[arr.psy.plotter for arr in sub_project[1:]], keys=share,
draw=False)
if make_plot:
for arr in sub_project:
arr.psy.plotter.reinit(draw=False, clear=clear)
if draw is None:
draw = rcParams['auto_draw']
if draw:
sub_project.draw()
if rcParams['auto_show']:
self.show()
self.extend(sub_project, new_name=True)
if self is gcp(True):
scp(sub_project)
return sub_project | Extract data from a dataset and visualize it with the given plotter
Parameters
----------
plotter_cls: type
The subclass of :class:`psyplot.plotter.Plotter` to use for
visualization
filename_or_obj: filename, :class:`xarray.Dataset` or data store
The object (or file name) to open. If not a dataset, the
:func:`psyplot.data.open_dataset` will be used to open a dataset
fmt: dict
Formatoptions that shall be when initializing the plot (you can
however also specify them as extra keyword arguments)
make_plot: bool
If True, the data is plotted at the end. Otherwise you have to
call the :meth:`psyplot.plotter.Plotter.initialize_plot` method or
the :meth:`psyplot.plotter.Plotter.reinit` method by yourself
%(InteractiveBase.start_update.parameters.draw)s
mf_mode: bool
If True, the :func:`psyplot.open_mfdataset` method is used.
Otherwise we use the :func:`psyplot.open_dataset` method which can
open only one single dataset
ax: None, tuple (x, y[, z]) or (list of) matplotlib.axes.Axes
Specifies the subplots on which to plot the new data objects.
- If None, a new figure will be created for each created plotter
- If tuple (x, y[, z]), `x` specifies the number of rows, `y` the
number of columns and the optional third parameter `z` the
maximal number of subplots per figure.
- If :class:`matplotlib.axes.Axes` (or list of those, e.g. created
by the :func:`matplotlib.pyplot.subplots` function), the data
will be plotted on these subplots
%(open_dataset.parameters.engine)s
%(multiple_subplots.parameters.delete)s
share: bool, fmt key or list of fmt keys
Determines whether the first created plotter shares it's
formatoptions with the others. If True, all formatoptions are
shared. Strings or list of strings specify the keys to share.
clear: bool
If True, axes are cleared before making the plot. This is only
necessary if the `ax` keyword consists of subplots with projection
that differs from the one that is needed
enable_post: bool
If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is
enabled and post processing scripts are allowed. If ``None``, this
parameter is set to True if there is a value given for the `post`
formatoption in `fmt` or `kwargs`
%(xarray.open_mfdataset.parameters.concat_dim)s
This parameter only does have an effect if `mf_mode` is True.
load: bool
If True, load the complete dataset into memory before plotting.
This might be useful if the data of other variables in the dataset
has to be accessed multiple times, e.g. for unstructured grids.
%(ArrayList.from_dataset.parameters.no_base)s
Other Parameters
----------------
%(ArrayList.from_dataset.other_parameters.no_args_kwargs)s
``**kwargs``
Any other dimension or formatoption that shall be passed to `dims`
or `fmt` respectively.
Returns
-------
Project
The subproject that contains the new (visualized) data array | entailment |
def joined_attrs(self, delimiter=', ', enhanced=True, plot_data=False,
keep_all=True):
"""Join the attributes of the arrays in this project
Parameters
----------
%(join_dicts.parameters.delimiter)s
enhanced: bool
If True, the :meth:`psyplot.plotter.Plotter.get_enhanced_attrs`
method is used, otherwise the :attr:`xarray.DataArray.attrs`
attribute is used.
plot_data: bool
It True, use the :attr:`psyplot.plotter.Plotter.plot_data`
attribute of the plotters rather than the raw data in this project
%(join_dicts.parameters.keep_all)s
Returns
-------
dict
A mapping from the attribute to the joined attributes which are
either strings or (if there is only one attribute value), the
data type of the corresponding value"""
if enhanced:
all_attrs = [
plotter.get_enhanced_attrs(
getattr(plotter, 'plot_data' if plot_data else 'data'))
for plotter in self.plotters]
else:
if plot_data:
all_attrs = [plotter.plot_data.attrs
for plotter in self.plotters]
else:
all_attrs = [arr.attrs for arr in self]
return utils.join_dicts(all_attrs, delimiter=delimiter,
keep_all=keep_all) | Join the attributes of the arrays in this project
Parameters
----------
%(join_dicts.parameters.delimiter)s
enhanced: bool
If True, the :meth:`psyplot.plotter.Plotter.get_enhanced_attrs`
method is used, otherwise the :attr:`xarray.DataArray.attrs`
attribute is used.
plot_data: bool
It True, use the :attr:`psyplot.plotter.Plotter.plot_data`
attribute of the plotters rather than the raw data in this project
%(join_dicts.parameters.keep_all)s
Returns
-------
dict
A mapping from the attribute to the joined attributes which are
either strings or (if there is only one attribute value), the
data type of the corresponding value | entailment |
def export(self, output, tight=False, concat=True, close_pdf=None,
use_time=False, **kwargs):
"""Exports the figures of the project to one or more image files
Parameters
----------
output: str, iterable or matplotlib.backends.backend_pdf.PdfPages
if string or list of strings, those define the names of the output
files. Otherwise you may provide an instance of
:class:`matplotlib.backends.backend_pdf.PdfPages` to save the
figures in it.
If string (or iterable of strings), attribute names in the
xarray.DataArray.attrs attribute as well as index dimensions
are replaced by the respective value (see examples below).
Furthermore a single format string without key (e.g. %i, %s, %d,
etc.) is replaced by a counter.
tight: bool
If True, it is tried to figure out the tight bbox of the figure
(same as bbox_inches='tight')
concat: bool
if True and the output format is `pdf`, all figures are
concatenated into one single pdf
close_pdf: bool or None
If True and the figures are concatenated into one single pdf,
the resulting pdf instance is closed. If False it remains open.
If None and `output` is a string, it is the same as
``close_pdf=True``, if None and `output` is neither a string nor an
iterable, it is the same as ``close_pdf=False``
use_time: bool
If True, formatting strings for the
:meth:`datetime.datetime.strftime` are expected to be found in
`output` (e.g. ``'%m'``, ``'%Y'``, etc.). If so, other formatting
strings must be escaped by double ``'%'`` (e.g. ``'%%i'`` instead
of (``'%i'``))
``**kwargs``
Any valid keyword for the :func:`matplotlib.pyplot.savefig`
function
Returns
-------
matplotlib.backends.backend_pdf.PdfPages or None
a PdfPages instance if output is a string and close_pdf is False,
otherwise None
Examples
--------
Simply save all figures into one single pdf::
>>> p = psy.gcp()
>>> p.export('my_plots.pdf')
Save all figures into separate pngs with increasing numbers (e.g.
``'my_plots_1.png'``)::
>>> p.export('my_plots_%i.png')
Save all figures into separate pngs with the name of the variables
shown in each figure (e.g. ``'my_plots_t2m.png'``)::
>>> p.export('my_plots_%(name)s.png')
Save all figures into separate pngs with the name of the variables
shown in each figure and with increasing numbers (e.g.
``'my_plots_1_t2m.png'``)::
>>> p.export('my_plots_%i_%(name)s.png')
Specify the names for each figure directly via a list::
>>> p.export(['my_plots1.pdf', 'my_plots2.pdf'])
"""
from matplotlib.backends.backend_pdf import PdfPages
if tight:
kwargs['bbox_inches'] = 'tight'
if use_time:
def insert_time(s, attrs):
time = attrs[tname]
try: # assume a valid datetime.datetime instance
s = pd.to_datetime(time).strftime(s)
except ValueError:
pass
return s
tnames = self._get_tnames()
tname = next(iter(tnames)) if len(tnames) == 1 else None
else:
def insert_time(s, attrs):
return s
tname = None
if isinstance(output, six.string_types): # a single string
out_fmt = kwargs.pop('format', os.path.splitext(output))[1][1:]
if out_fmt.lower() == 'pdf' and concat:
attrs = self.joined_attrs('-')
if tname is not None and tname in attrs:
output = insert_time(output, attrs)
pdf = PdfPages(safe_modulo(output, attrs))
def save(fig):
pdf.savefig(fig, **kwargs)
def close():
if close_pdf is None or close_pdf:
pdf.close()
return
return pdf
else:
def save(fig):
attrs = self.figs[fig].joined_attrs('-')
out = output
if tname is not None and tname in attrs:
out = insert_time(out, attrs)
try:
out = safe_modulo(out, i, print_warning=False)
except TypeError:
pass
fig.savefig(safe_modulo(out, attrs), **kwargs)
def close():
pass
elif isinstance(output, Iterable): # a list of strings
output = cycle(output)
def save(fig):
attrs = self.figs[fig].joined_attrs('-')
out = next(output)
if tname is not None and tname in attrs:
out = insert_time(out, attrs)
try:
out = safe_modulo(next(output), i, print_warning=False)
except TypeError:
pass
fig.savefig(safe_modulo(out, attrs), **kwargs)
def close():
pass
else: # an instances of matplotlib.backends.backend_pdf.PdfPages
def save(fig):
output.savefig(fig, **kwargs)
def close():
if close_pdf:
output.close()
for i, fig in enumerate(self.figs, 1):
save(fig)
return close() | Exports the figures of the project to one or more image files
Parameters
----------
output: str, iterable or matplotlib.backends.backend_pdf.PdfPages
if string or list of strings, those define the names of the output
files. Otherwise you may provide an instance of
:class:`matplotlib.backends.backend_pdf.PdfPages` to save the
figures in it.
If string (or iterable of strings), attribute names in the
xarray.DataArray.attrs attribute as well as index dimensions
are replaced by the respective value (see examples below).
Furthermore a single format string without key (e.g. %i, %s, %d,
etc.) is replaced by a counter.
tight: bool
If True, it is tried to figure out the tight bbox of the figure
(same as bbox_inches='tight')
concat: bool
if True and the output format is `pdf`, all figures are
concatenated into one single pdf
close_pdf: bool or None
If True and the figures are concatenated into one single pdf,
the resulting pdf instance is closed. If False it remains open.
If None and `output` is a string, it is the same as
``close_pdf=True``, if None and `output` is neither a string nor an
iterable, it is the same as ``close_pdf=False``
use_time: bool
If True, formatting strings for the
:meth:`datetime.datetime.strftime` are expected to be found in
`output` (e.g. ``'%m'``, ``'%Y'``, etc.). If so, other formatting
strings must be escaped by double ``'%'`` (e.g. ``'%%i'`` instead
of (``'%i'``))
``**kwargs``
Any valid keyword for the :func:`matplotlib.pyplot.savefig`
function
Returns
-------
matplotlib.backends.backend_pdf.PdfPages or None
a PdfPages instance if output is a string and close_pdf is False,
otherwise None
Examples
--------
Simply save all figures into one single pdf::
>>> p = psy.gcp()
>>> p.export('my_plots.pdf')
Save all figures into separate pngs with increasing numbers (e.g.
``'my_plots_1.png'``)::
>>> p.export('my_plots_%i.png')
Save all figures into separate pngs with the name of the variables
shown in each figure (e.g. ``'my_plots_t2m.png'``)::
>>> p.export('my_plots_%(name)s.png')
Save all figures into separate pngs with the name of the variables
shown in each figure and with increasing numbers (e.g.
``'my_plots_1_t2m.png'``)::
>>> p.export('my_plots_%i_%(name)s.png')
Specify the names for each figure directly via a list::
>>> p.export(['my_plots1.pdf', 'my_plots2.pdf']) | entailment |
def share(self, base=None, keys=None, by=None, **kwargs):
"""
Share the formatoptions of one plotter with all the others
This method shares specified formatoptions from `base` with all the
plotters in this instance.
Parameters
----------
base: None, Plotter, xarray.DataArray, InteractiveList, or list of them
The source of the plotter that shares its formatoptions with the
others. It can be None (then the first instance in this project
is used), a :class:`~psyplot.plotter.Plotter` or any data object
with a *psy* attribute. If `by` is not None, then it is expected
that `base` is a list of data objects for each figure/axes
%(Plotter.share.parameters.keys)s
by: {'fig', 'figure', 'ax', 'axes'}
Share the formatoptions only with the others on the same
``'figure'`` or the same ``'axes'``. In this case, base must either
be ``None`` or a list of the types specified for `base`
%(Plotter.share.parameters.no_keys|plotters)s
See Also
--------
psyplot.plotter.share"""
if by is not None:
if base is not None:
if hasattr(base, 'psy') or isinstance(base, Plotter):
base = [base]
if by.lower() in ['ax', 'axes']:
bases = {ax: p[0] for ax, p in six.iteritems(
Project(base).axes)}
elif by.lower() in ['fig', 'figure']:
bases = {fig: p[0] for fig, p in six.iteritems(
Project(base).figs)}
else:
raise ValueError(
"*by* must be out of {'fig', 'figure', 'ax', 'axes'}. "
"Not %s" % (by, ))
else:
bases = {}
projects = self.axes if by == 'axes' else self.figs
for obj, p in projects.items():
p.share(bases.get(obj), keys, **kwargs)
else:
plotters = self.plotters
if not plotters:
return
if base is None:
if len(plotters) == 1:
return
base = plotters[0]
plotters = plotters[1:]
elif not isinstance(base, Plotter):
base = getattr(getattr(base, 'psy', base), 'plotter', base)
base.share(plotters, keys=keys, **kwargs) | Share the formatoptions of one plotter with all the others
This method shares specified formatoptions from `base` with all the
plotters in this instance.
Parameters
----------
base: None, Plotter, xarray.DataArray, InteractiveList, or list of them
The source of the plotter that shares its formatoptions with the
others. It can be None (then the first instance in this project
is used), a :class:`~psyplot.plotter.Plotter` or any data object
with a *psy* attribute. If `by` is not None, then it is expected
that `base` is a list of data objects for each figure/axes
%(Plotter.share.parameters.keys)s
by: {'fig', 'figure', 'ax', 'axes'}
Share the formatoptions only with the others on the same
``'figure'`` or the same ``'axes'``. In this case, base must either
be ``None`` or a list of the types specified for `base`
%(Plotter.share.parameters.no_keys|plotters)s
See Also
--------
psyplot.plotter.share | entailment |
def save_project(self, fname=None, pwd=None, pack=False, **kwargs):
"""
Save this project to a file
Parameters
----------
fname: str or None
If None, the dictionary will be returned. Otherwise the necessary
information to load this project via the :meth:`load` method is
saved to `fname` using the :mod:`pickle` module
pwd: str or None, optional
Path to the working directory from where the data can be imported.
If None and `fname` is the path to a file, `pwd` is set to the
directory of this file. Otherwise the current working directory is
used.
pack: bool
If True, all datasets are packed into the folder of `fname`
and will be used if the data is loaded
%(ArrayList.array_info.parameters.no_pwd|copy)s
Notes
-----
You can also store the entire data in the pickled file by setting
``ds_description={'ds'}``"""
# store the figure informatoptions and array informations
if fname is not None and pwd is None and not pack:
pwd = os.path.dirname(fname)
if pack and fname is not None:
target_dir = os.path.dirname(fname)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def tmp_it():
from tempfile import NamedTemporaryFile
while True:
yield NamedTemporaryFile(
dir=target_dir, suffix='.nc').name
kwargs.setdefault('paths', tmp_it())
if fname is not None:
kwargs['copy'] = True
_update_versions()
ret = {'figs': dict(map(_ProjectLoader.inspect_figure, self.figs)),
'arrays': self.array_info(pwd=pwd, **kwargs),
'versions': _deepcopy(_versions)}
if pack and fname is not None:
# we get the filenames out of the results and copy the datasets
# there. After that we check the filenames again and force them
# to the desired directory
from shutil import copyfile
fnames = (f[0] for f in self._get_dsnames(ret['arrays']))
alternative_paths = kwargs.pop('alternative_paths', {})
counters = defaultdict(int)
if kwargs.get('use_rel_paths', True):
get_path = partial(os.path.relpath, start=target_dir)
else:
get_path = os.path.abspath
for ds_fname in unique_everseen(chain(alternative_paths, fnames)):
if ds_fname is None or utils.is_remote_url(ds_fname):
continue
dst_file = alternative_paths.get(
ds_fname, os.path.join(target_dir, os.path.basename(
ds_fname)))
orig_dst_file = dst_file
if counters[dst_file] and (
not os.path.exists(dst_file) or
not os.path.samefile(ds_fname, dst_file)):
dst_file, ext = os.path.splitext(dst_file)
dst_file += '-' + str(counters[orig_dst_file]) + ext
if (not os.path.exists(dst_file) or
not os.path.samefile(ds_fname, dst_file)):
copyfile(ds_fname, dst_file)
counters[orig_dst_file] += 1
alternative_paths.setdefault(ds_fname, get_path(dst_file))
ret['arrays'] = self.array_info(
pwd=pwd, alternative_paths=alternative_paths, **kwargs)
# store the plotter settings
for arr, d in zip(self, six.itervalues(ret['arrays'])):
if arr.psy.plotter is None:
continue
plotter = arr.psy.plotter
d['plotter'] = {
'ax': _ProjectLoader.inspect_axes(plotter.ax),
'fmt': {key: getattr(plotter, key).value2pickle
for key in plotter},
'cls': (plotter.__class__.__module__,
plotter.__class__.__name__),
'shared': {}}
d['plotter']['ax']['shared'] = set(
other.psy.arr_name for other in self
if other.psy.ax == plotter.ax)
if plotter.ax._sharex:
d['plotter']['ax']['sharex'] = next(
(other.psy.arr_name for other in self
if other.psy.ax == plotter.ax._sharex), None)
if plotter.ax._sharey:
d['plotter']['ax']['sharey'] = next(
(other.psy.arr_name for other in self
if other.psy.ax == plotter.ax._sharey), None)
shared = d['plotter']['shared']
for fmto in plotter._fmtos:
if fmto.shared:
shared[fmto.key] = [other_fmto.plotter.data.psy.arr_name
for other_fmto in fmto.shared]
if fname is not None:
with open(fname, 'wb') as f:
pickle.dump(ret, f)
return None
return ret | Save this project to a file
Parameters
----------
fname: str or None
If None, the dictionary will be returned. Otherwise the necessary
information to load this project via the :meth:`load` method is
saved to `fname` using the :mod:`pickle` module
pwd: str or None, optional
Path to the working directory from where the data can be imported.
If None and `fname` is the path to a file, `pwd` is set to the
directory of this file. Otherwise the current working directory is
used.
pack: bool
If True, all datasets are packed into the folder of `fname`
and will be used if the data is loaded
%(ArrayList.array_info.parameters.no_pwd|copy)s
Notes
-----
You can also store the entire data in the pickled file by setting
``ds_description={'ds'}`` | entailment |
def keys(self, *args, **kwargs):
"""
Show the available formatoptions in this project
Parameters
----------
%(Plotter.show_keys.parameters)s
Other Parameters
----------------
%(Plotter.show_keys.other_parameters)s
Returns
-------
%(Plotter.show_keys.returns)s"""
class TmpClass(Plotter):
pass
for fmto in self._fmtos:
setattr(TmpClass, fmto.key, type(fmto)(fmto.key))
return TmpClass.show_keys(*args, **kwargs) | Show the available formatoptions in this project
Parameters
----------
%(Plotter.show_keys.parameters)s
Other Parameters
----------------
%(Plotter.show_keys.other_parameters)s
Returns
-------
%(Plotter.show_keys.returns)s | entailment |
def summaries(self, *args, **kwargs):
"""
Show the available formatoptions and their summaries in this project
Parameters
----------
%(Plotter.show_keys.parameters)s
Other Parameters
----------------
%(Plotter.show_keys.other_parameters)s
Returns
-------
%(Plotter.show_keys.returns)s"""
class TmpClass(Plotter):
pass
for fmto in self._fmtos:
setattr(TmpClass, fmto.key, type(fmto)(fmto.key))
return TmpClass.show_summaries(*args, **kwargs) | Show the available formatoptions and their summaries in this project
Parameters
----------
%(Plotter.show_keys.parameters)s
Other Parameters
----------------
%(Plotter.show_keys.other_parameters)s
Returns
-------
%(Plotter.show_keys.returns)s | entailment |
def docs(self, *args, **kwargs):
"""
Show the available formatoptions in this project and their full docu
Parameters
----------
%(Plotter.show_keys.parameters)s
Other Parameters
----------------
%(Plotter.show_keys.other_parameters)s
Returns
-------
%(Plotter.show_keys.returns)s"""
class TmpClass(Plotter):
pass
for fmto in self._fmtos:
setattr(TmpClass, fmto.key, type(fmto)(fmto.key))
return TmpClass.show_docs(*args, **kwargs) | Show the available formatoptions in this project and their full docu
Parameters
----------
%(Plotter.show_keys.parameters)s
Other Parameters
----------------
%(Plotter.show_keys.other_parameters)s
Returns
-------
%(Plotter.show_keys.returns)s | entailment |
def from_dataset(cls, *args, **kwargs):
"""Construct an ArrayList instance from an existing base dataset
Parameters
----------
%(ArrayList.from_dataset.parameters)s
main: Project
The main project that this project corresponds to
Other Parameters
----------------
%(ArrayList.from_dataset.other_parameters)s
Returns
-------
Project
The newly created project instance
"""
main = kwargs.pop('main', None)
ret = super(Project, cls).from_dataset(*args, **kwargs)
if main is not None:
ret.main = main
main.extend(ret, new_name=False)
return ret | Construct an ArrayList instance from an existing base dataset
Parameters
----------
%(ArrayList.from_dataset.parameters)s
main: Project
The main project that this project corresponds to
Other Parameters
----------------
%(ArrayList.from_dataset.other_parameters)s
Returns
-------
Project
The newly created project instance | entailment |
def load_project(cls, fname, auto_update=None, make_plot=True,
draw=False, alternative_axes=None, main=False,
encoding=None, enable_post=False, new_fig=True,
clear=None, **kwargs):
"""
Load a project from a file or dict
This classmethod allows to load a project that has been stored using
the :meth:`save_project` method and reads all the data and creates the
figures.
Since the data is stored in external files when saving a project,
make sure that the data is accessible under the relative paths
as stored in the file `fname` or from the current working directory
if `fname` is a dictionary. Alternatively use the `alternative_paths`
parameter or the `pwd` parameter
Parameters
----------
fname: str or dict
The string might be the path to a file created with the
:meth:`save_project` method, or it might be a dictionary from this
method
%(InteractiveBase.parameters.auto_update)s
%(Project._add_data.parameters.make_plot)s
%(InteractiveBase.start_update.parameters.draw)s
alternative_axes: dict, None or list
alternative axes instances to use
- If it is None, the axes and figures from the saving point will be
reproduced.
- a dictionary should map from array names in the created
project to matplotlib axes instances
- a list should contain axes instances that will be used for
iteration
main: bool, optional
If True, a new main project is created and returned.
Otherwise (by default default) the data is added to the current
main project.
encoding: str
The encoding to use for loading the project. If None, it is
automatically determined by pickle. Note: Set this to ``'latin1'``
if using a project created with python2 on python3.
enable_post: bool
If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is
enabled and post processing scripts are allowed. Do only set this
parameter to ``True`` if you know you can trust the information in
`fname`
new_fig: bool
If True (default) and `alternative_axes` is None, new figures are
created if the figure already exists
%(Project._add_data.parameters.clear)s
pwd: str or None, optional
Path to the working directory from where the data can be imported.
If None and `fname` is the path to a file, `pwd` is set to the
directory of this file. Otherwise the current working directory is
used.
%(ArrayList.from_dict.parameters.no_d|pwd)s
Other Parameters
----------------
%(ArrayList.from_dict.parameters)s
Returns
-------
Project
The project in state of the saving point"""
from pkg_resources import iter_entry_points
def get_ax_base(name, alternatives):
ax_base = next(iter(obj(arr_name=name).axes), None)
if ax_base is None:
ax_base = next(iter(obj(arr_name=alternatives).axes), None)
if ax_base is not None:
alternatives.difference_update(obj(ax=ax_base).arr_names)
return ax_base
pwd = kwargs.pop('pwd', None)
if isinstance(fname, six.string_types):
with open(fname, 'rb') as f:
pickle_kws = {} if not encoding else {'encoding': encoding}
d = pickle.load(f, **pickle_kws)
pwd = pwd or os.path.dirname(fname)
else:
d = dict(fname)
pwd = pwd or getcwd()
# check for patches of plugins
for ep in iter_entry_points('psyplot', name='patches'):
patches = ep.load()
for arr_d in d.get('arrays').values():
plotter_cls = arr_d.get('plotter', {}).get('cls')
if plotter_cls is not None and plotter_cls in patches:
# apply the patch
patches[plotter_cls](arr_d['plotter'],
d.get('versions', {}))
fig_map = {}
if alternative_axes is None:
for fig_dict in six.itervalues(d.get('figs', {})):
orig_num = fig_dict.get('num') or 1
fig_map[orig_num] = _ProjectLoader.load_figure(
fig_dict, new_fig=new_fig).number
elif not isinstance(alternative_axes, dict):
alternative_axes = cycle(iter(alternative_axes))
obj = cls.from_dict(d['arrays'], pwd=pwd, **kwargs)
if main:
# we create a new project with the project factory to make sure
# that everything is handled correctly
obj = project(None, obj)
axes = {}
arr_names = obj.arr_names
sharex = defaultdict(set)
sharey = defaultdict(set)
for arr, (arr_name, arr_dict) in zip(
obj, filter(lambda t: t[0] in arr_names,
six.iteritems(d['arrays']))):
if not arr_dict.get('plotter'):
continue
plot_dict = arr_dict['plotter']
plotter_cls = getattr(
import_module(plot_dict['cls'][0]), plot_dict['cls'][1])
ax = None
if alternative_axes is not None:
if isinstance(alternative_axes, dict):
ax = alternative_axes.get(arr.arr_name)
else:
ax = next(alternative_axes, None)
if ax is None and 'ax' in plot_dict:
already_opened = plot_dict['ax'].get(
'shared', set()).intersection(axes)
if already_opened:
ax = axes[next(iter(already_opened))]
else:
plot_dict['ax'].pop('shared', None)
plot_dict['ax']['fig'] = fig_map[
plot_dict['ax'].get('fig') or 1]
if plot_dict['ax'].get('sharex'):
sharex[plot_dict['ax'].pop('sharex')].add(
arr.psy.arr_name)
if plot_dict['ax'].get('sharey'):
sharey[plot_dict['ax'].pop('sharey')].add(
arr.psy.arr_name)
axes[arr.psy.arr_name] = ax = _ProjectLoader.load_axes(
plot_dict['ax'])
plotter_cls(
arr, make_plot=False, draw=False, clear=False,
ax=ax, project=obj.main, enable_post=enable_post,
**plot_dict['fmt'])
# handle shared x and y-axes
for key, names in sharex.items():
ax_base = get_ax_base(key, names)
if ax_base is not None:
ax_base.get_shared_x_axes().join(
ax_base, *obj(arr_name=names).axes)
for ax in obj(arr_name=names).axes:
ax._sharex = ax_base
for key, names in sharey.items():
ax_base = get_ax_base(key, names)
if ax_base is not None:
ax_base.get_shared_y_axes().join(
ax_base, *obj(arr_name=names).axes)
for ax in obj(arr_name=names).axes:
ax._sharey = ax_base
for arr in obj.with_plotter:
shared = d['arrays'][arr.psy.arr_name]['plotter'].get('shared', {})
for key, arr_names in six.iteritems(shared):
arr.psy.plotter.share(obj(arr_name=arr_names).plotters,
keys=[key])
if make_plot:
for plotter in obj.plotters:
plotter.reinit(
draw=False,
clear=clear or (
clear is None and
plotter_cls._get_sample_projection() is not None))
if draw is None:
draw = rcParams['auto_draw']
if draw:
obj.draw()
if rcParams['auto_show']:
obj.show()
if auto_update is None:
auto_update = rcParams['lists.auto_update']
if not main:
obj._main = gcp(True)
obj.main.extend(obj, new_name=True)
obj.no_auto_update = not auto_update
scp(obj)
return obj | Load a project from a file or dict
This classmethod allows to load a project that has been stored using
the :meth:`save_project` method and reads all the data and creates the
figures.
Since the data is stored in external files when saving a project,
make sure that the data is accessible under the relative paths
as stored in the file `fname` or from the current working directory
if `fname` is a dictionary. Alternatively use the `alternative_paths`
parameter or the `pwd` parameter
Parameters
----------
fname: str or dict
The string might be the path to a file created with the
:meth:`save_project` method, or it might be a dictionary from this
method
%(InteractiveBase.parameters.auto_update)s
%(Project._add_data.parameters.make_plot)s
%(InteractiveBase.start_update.parameters.draw)s
alternative_axes: dict, None or list
alternative axes instances to use
- If it is None, the axes and figures from the saving point will be
reproduced.
- a dictionary should map from array names in the created
project to matplotlib axes instances
- a list should contain axes instances that will be used for
iteration
main: bool, optional
If True, a new main project is created and returned.
Otherwise (by default default) the data is added to the current
main project.
encoding: str
The encoding to use for loading the project. If None, it is
automatically determined by pickle. Note: Set this to ``'latin1'``
if using a project created with python2 on python3.
enable_post: bool
If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is
enabled and post processing scripts are allowed. Do only set this
parameter to ``True`` if you know you can trust the information in
`fname`
new_fig: bool
If True (default) and `alternative_axes` is None, new figures are
created if the figure already exists
%(Project._add_data.parameters.clear)s
pwd: str or None, optional
Path to the working directory from where the data can be imported.
If None and `fname` is the path to a file, `pwd` is set to the
directory of this file. Otherwise the current working directory is
used.
%(ArrayList.from_dict.parameters.no_d|pwd)s
Other Parameters
----------------
%(ArrayList.from_dict.parameters)s
Returns
-------
Project
The project in state of the saving point | entailment |
def scp(cls, project):
"""
Set the current project
Parameters
----------
project: Project or None
The project to set. If it is None, the current subproject is set
to empty. If it is a sub project (see:attr:`Project.is_main`),
the current subproject is set to this project. Otherwise it
replaces the current main project
See Also
--------
scp: The global version for setting the current project
gcp: Returns the current project
project: Creates a new project"""
if project is None:
_scp(None)
cls.oncpchange.emit(gcp())
elif not project.is_main:
if project.main is not _current_project:
_scp(project.main, True)
cls.oncpchange.emit(project.main)
_scp(project)
cls.oncpchange.emit(project)
else:
_scp(project, True)
cls.oncpchange.emit(project)
sp = project[:]
_scp(sp)
cls.oncpchange.emit(sp) | Set the current project
Parameters
----------
project: Project or None
The project to set. If it is None, the current subproject is set
to empty. If it is a sub project (see:attr:`Project.is_main`),
the current subproject is set to this project. Otherwise it
replaces the current main project
See Also
--------
scp: The global version for setting the current project
gcp: Returns the current project
project: Creates a new project | entailment |
def new(cls, num=None, *args, **kwargs):
"""
Create a new main project
Parameters
----------
num: int
The number of the project
%(Project.parameters.no_num)s
Returns
-------
Project
The with the given `num` (if it does not already exist, it is
created)
See Also
--------
scp: Sets the current project
gcp: Returns the current project
"""
project = cls(*args, num=num, **kwargs)
scp(project)
return project | Create a new main project
Parameters
----------
num: int
The number of the project
%(Project.parameters.no_num)s
Returns
-------
Project
The with the given `num` (if it does not already exist, it is
created)
See Also
--------
scp: Sets the current project
gcp: Returns the current project | entailment |
def inspect_figure(fig):
"""Get the parameters (heigth, width, etc.) to create a figure
This method returns the number of the figure and a dictionary
containing the necessary information for the
:func:`matplotlib.pyplot.figure` function"""
return fig.number, {
'num': fig.number,
'figsize': (fig.get_figwidth(), fig.get_figheight()),
'dpi': fig.get_dpi() / getattr(fig.canvas, '_dpi_ratio', 1),
'facecolor': fig.get_facecolor(),
'edgecolor': fig.get_edgecolor(),
'frameon': fig.get_frameon(),
'tight_layout': fig.get_tight_layout(),
'subplotpars': vars(fig.subplotpars)} | Get the parameters (heigth, width, etc.) to create a figure
This method returns the number of the figure and a dictionary
containing the necessary information for the
:func:`matplotlib.pyplot.figure` function | entailment |
def load_figure(d, new_fig=True):
"""Create a figure from what is returned by :meth:`inspect_figure`"""
import matplotlib.pyplot as plt
subplotpars = d.pop('subplotpars', None)
if subplotpars is not None:
subplotpars.pop('validate', None)
subplotpars = mfig.SubplotParams(**subplotpars)
if new_fig:
nums = plt.get_fignums()
if d.get('num') in nums:
d['num'] = next(
i for i in range(max(plt.get_fignums()) + 1, 0, -1)
if i not in nums)
return plt.figure(subplotpars=subplotpars, **d) | Create a figure from what is returned by :meth:`inspect_figure` | entailment |
def inspect_axes(ax):
"""Inspect an axes or subplot to get the initialization parameters"""
ret = {'fig': ax.get_figure().number}
if mpl.__version__ < '2.0':
ret['axisbg'] = ax.get_axis_bgcolor()
else: # axisbg is depreceated
ret['facecolor'] = ax.get_facecolor()
proj = getattr(ax, 'projection', None)
if proj is not None and not isinstance(proj, six.string_types):
proj = (proj.__class__.__module__, proj.__class__.__name__)
ret['projection'] = proj
ret['visible'] = ax.get_visible()
ret['spines'] = {}
ret['zorder'] = ax.get_zorder()
ret['yaxis_inverted'] = ax.yaxis_inverted()
ret['xaxis_inverted'] = ax.xaxis_inverted()
for key, val in ax.spines.items():
ret['spines'][key] = {}
for prop in ['linestyle', 'edgecolor', 'linewidth',
'facecolor', 'visible']:
ret['spines'][key][prop] = getattr(val, 'get_' + prop)()
if isinstance(ax, mfig.SubplotBase):
sp = ax.get_subplotspec().get_topmost_subplotspec()
ret['grid_spec'] = sp.get_geometry()[:2]
ret['subplotspec'] = [sp.num1, sp.num2]
ret['is_subplot'] = True
else:
ret['args'] = [ax.get_position(True).bounds]
ret['is_subplot'] = False
return ret | Inspect an axes or subplot to get the initialization parameters | entailment |
def load_axes(d):
"""Create an axes or subplot from what is returned by
:meth:`inspect_axes`"""
import matplotlib.pyplot as plt
fig = plt.figure(d.pop('fig', None))
proj = d.pop('projection', None)
spines = d.pop('spines', None)
invert_yaxis = d.pop('yaxis_inverted', None)
invert_xaxis = d.pop('xaxis_inverted', None)
if mpl.__version__ >= '2.0' and 'axisbg' in d: # axisbg is depreceated
d['facecolor'] = d.pop('axisbg')
elif mpl.__version__ < '2.0' and 'facecolor' in d:
d['axisbg'] = d.pop('facecolor')
if proj is not None and not isinstance(proj, six.string_types):
proj = getattr(import_module(proj[0]), proj[1])()
if d.pop('is_subplot', None):
grid_spec = mpl.gridspec.GridSpec(*d.pop('grid_spec', (1, 1)))
subplotspec = mpl.gridspec.SubplotSpec(
grid_spec, *d.pop('subplotspec', (1, None)))
return fig.add_subplot(subplotspec, projection=proj, **d)
ret = fig.add_axes(*d.pop('args', []), projection=proj, **d)
if spines is not None:
for key, val in spines.items():
ret.spines[key].update(val)
if invert_xaxis:
if ret.get_xlim()[0] < ret.get_xlim()[1]:
ret.invert_xaxis()
if invert_yaxis:
if ret.get_ylim()[0] < ret.get_ylim()[1]:
ret.invert_yaxis()
return ret | Create an axes or subplot from what is returned by
:meth:`inspect_axes` | entailment |
def _plot_methods(self):
"""A dictionary with mappings from plot method to their summary"""
ret = {}
for attr in filter(lambda s: not s.startswith("_"), dir(self)):
obj = getattr(self, attr)
if isinstance(obj, PlotterInterface):
ret[attr] = obj._summary
return ret | A dictionary with mappings from plot method to their summary | entailment |
def show_plot_methods(self):
"""Print the plotmethods of this instance"""
print_func = PlotterInterface._print_func
if print_func is None:
print_func = six.print_
s = "\n".join(
"%s\n %s" % t for t in six.iteritems(self._plot_methods))
return print_func(s) | Print the plotmethods of this instance | entailment |
def _register_plotter(cls, identifier, module, plotter_name,
plotter_cls=None, summary='', prefer_list=False,
default_slice=None, default_dims={},
show_examples=True,
example_call="filename, name=['my_variable'], ...",
plugin=None):
"""
Register a plotter for making plots
This class method registeres a plot function for the :class:`Project`
class under the name of the given `identifier`
Parameters
----------
%(Project._register_plotter.parameters)s
Other Parameters
----------------
prefer_list: bool
Determines the `prefer_list` parameter in the `from_dataset`
method. If True, the plotter is expected to work with instances of
:class:`psyplot.InteractiveList` instead of
:class:`psyplot.InteractiveArray`.
%(ArrayList.from_dataset.parameters.default_slice)s
default_dims: dict
Default dimensions that shall be used for plotting (e.g.
{'x': slice(None), 'y': slice(None)} for longitude-latitude plots)
show_examples: bool, optional
If True, examples how to access the plotter documentation are
included in class documentation
example_call: str, optional
The arguments and keyword arguments that shall be included in the
example of the generated plot method. This call will then appear as
``>>> psy.plot.%%(identifier)s(%%(example_call)s)`` in the
documentation
plugin: str
The name of the plugin
"""
full_name = '%s.%s' % (module, plotter_name)
if plotter_cls is not None: # plotter has already been imported
docstrings.params['%s.formatoptions' % (full_name)] = \
plotter_cls.show_keys(
indent=4, func=str,
# include links in sphinx doc
include_links=None)
doc_str = ('Possible formatoptions are\n\n'
'%%(%s.formatoptions)s') % full_name
else:
doc_str = ''
summary = summary or (
'Open and plot data via :class:`%s.%s` plotters' % (
module, plotter_name))
if plotter_cls is not None:
_versions.update(get_versions(key=lambda s: s == plugin))
class PlotMethod(cls._plot_method_base_cls):
__doc__ = cls._gen_doc(summary, full_name, identifier,
example_call, doc_str, show_examples)
_default_slice = default_slice
_default_dims = default_dims
_plotter_cls = plotter_cls
_prefer_list = prefer_list
_plugin = plugin
_summary = summary
setattr(cls, identifier, PlotMethod(identifier, module, plotter_name)) | Register a plotter for making plots
This class method registeres a plot function for the :class:`Project`
class under the name of the given `identifier`
Parameters
----------
%(Project._register_plotter.parameters)s
Other Parameters
----------------
prefer_list: bool
Determines the `prefer_list` parameter in the `from_dataset`
method. If True, the plotter is expected to work with instances of
:class:`psyplot.InteractiveList` instead of
:class:`psyplot.InteractiveArray`.
%(ArrayList.from_dataset.parameters.default_slice)s
default_dims: dict
Default dimensions that shall be used for plotting (e.g.
{'x': slice(None), 'y': slice(None)} for longitude-latitude plots)
show_examples: bool, optional
If True, examples how to access the plotter documentation are
included in class documentation
example_call: str, optional
The arguments and keyword arguments that shall be included in the
example of the generated plot method. This call will then appear as
``>>> psy.plot.%%(identifier)s(%%(example_call)s)`` in the
documentation
plugin: str
The name of the plugin | entailment |
def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str,
show_examples):
"""Generate the documentation docstring for a PlotMethod"""
ret = docstrings.dedents("""
%s
This plotting method adds data arrays and plots them via
:class:`%s` plotters
To plot data from a netCDF file type::
>>> psy.plot.%s(%s)
%s""" % (summary, full_name, identifier, example_call, doc_str))
if show_examples:
ret += '\n\n' + cls._gen_examples(identifier)
return ret | Generate the documentation docstring for a PlotMethod | entailment |
def plotter_cls(self):
"""The plotter class"""
ret = self._plotter_cls
if ret is None:
self._logger.debug('importing %s', self.module)
mod = import_module(self.module)
plotter = self.plotter_name
if plotter not in vars(mod):
raise ImportError("Module %r does not have a %r plotter!" % (
mod, plotter))
ret = self._plotter_cls = getattr(mod, plotter)
_versions.update(get_versions(key=lambda s: s == self._plugin))
return ret | The plotter class | entailment |
def check_data(self, ds, name, dims):
"""
A validation method for the data shape
Parameters
----------
name: list of lists of strings
The variable names (see the
:meth:`~psyplot.plotter.Plotter.check_data` method of the
:attr:`plotter_cls` attribute for details)
dims: list of dictionaries
The dimensions of the arrays. It will be enhanced by the default
dimensions of this plot method
is_unstructured: bool or list of bool
True if the corresponding array is unstructured.
Returns
-------
%(Plotter.check_data.returns)s
"""
if isinstance(name, six.string_types):
name = [name]
dims = [dims]
else:
dims = list(dims)
variables = [ds[safe_list(n)[0]] for n in name]
decoders = [CFDecoder.get_decoder(ds, var) for var in variables]
default_slice = slice(None) if self._default_slice is None else \
self._default_slice
for i, (dim_dict, var, decoder) in enumerate(zip(
dims, variables, decoders)):
corrected = decoder.correct_dims(var, dict(chain(
six.iteritems(self._default_dims),
dim_dict.items())))
# now use the default slice (we don't do this before because the
# `correct_dims` method doesn't use 'x', 'y', 'z' and 't' (as used
# for the _default_dims) if the real dimension name is already in
# the dictionary)
for dim in var.dims:
corrected.setdefault(dim, default_slice)
dims[i] = [
dim for dim, val in map(lambda t: (t[0], safe_list(t[1])),
six.iteritems(corrected))
if val and (len(val) > 1 or _is_slice(val[0]))]
return self.plotter_cls.check_data(
name, dims, [decoder.is_unstructured(var) for decoder, var in zip(
decoders, variables)]) | A validation method for the data shape
Parameters
----------
name: list of lists of strings
The variable names (see the
:meth:`~psyplot.plotter.Plotter.check_data` method of the
:attr:`plotter_cls` attribute for details)
dims: list of dictionaries
The dimensions of the arrays. It will be enhanced by the default
dimensions of this plot method
is_unstructured: bool or list of bool
True if the corresponding array is unstructured.
Returns
-------
%(Plotter.check_data.returns)s | entailment |
def _add_data(self, plotter_cls, *args, **kwargs):
"""
Add new plots to the project
Parameters
----------
%(ProjectPlotter._add_data.parameters.no_filename_or_obj)s
Other Parameters
----------------
%(ProjectPlotter._add_data.other_parameters)s
Returns
-------
%(ProjectPlotter._add_data.returns)s
"""
# this method is just a shortcut to the :meth:`Project._add_data`
# method but is reimplemented by subclasses as the
# :class:`DatasetPlotter` or the :class:`DataArrayPlotter`
return super(DatasetPlotter, self)._add_data(plotter_cls, self._ds,
*args, **kwargs) | Add new plots to the project
Parameters
----------
%(ProjectPlotter._add_data.parameters.no_filename_or_obj)s
Other Parameters
----------------
%(ProjectPlotter._add_data.other_parameters)s
Returns
-------
%(ProjectPlotter._add_data.returns)s | entailment |
def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str,
show_examples):
"""Generate the documentation docstring for a PlotMethod"""
# leave out the first argument
example_call = ', '.join(map(str.strip, example_call.split(',')[1:]))
ret = docstrings.dedents("""
%s
This plotting method adds data arrays and plots them via
:class:`%s` plotters
To plot a variable in this dataset, type::
>>> ds.psy.plot.%s(%s)
%s""" % (summary, full_name, identifier, example_call, doc_str))
if show_examples:
ret += '\n\n' + cls._gen_examples(identifier)
return ret | Generate the documentation docstring for a PlotMethod | entailment |
def check_data(self, *args, **kwargs):
"""Check whether the plotter of this plot method can visualize the data
"""
plotter_cls = self.plotter_cls
da_list = self._project_plotter._da.psy.to_interactive_list()
return plotter_cls.check_data(
da_list.all_names, da_list.all_dims, da_list.is_unstructured) | Check whether the plotter of this plot method can visualize the data | entailment |
def _add_data(self, plotter_cls, *args, **kwargs):
"""
Visualize this data array
Parameters
----------
%(Plotter.parameters.no_data)s
Returns
-------
psyplot.plotter.Plotter
The plotter that visualizes the data
"""
# this method is just a shortcut to the :meth:`Project._add_data`
# method but is reimplemented by subclasses as the
# :class:`DatasetPlotter` or the :class:`DataArrayPlotter`
return plotter_cls(self._da, *args, **kwargs) | Visualize this data array
Parameters
----------
%(Plotter.parameters.no_data)s
Returns
-------
psyplot.plotter.Plotter
The plotter that visualizes the data | entailment |
def yaml_from_file(self, fpath):
"""Collect Parameter stanzas from inline + file.
This allows use to reference an external file for the actual
parameter definitions.
"""
lookup = self._load_param_file(fpath)
if not lookup:
return
content = "\n".join(self.content)
parsed = yaml.safe_load(content)
# self.app.info("Params loaded is %s" % parsed)
# self.app.info("Lookup table looks like %s" % lookup)
new_content = list()
for paramlist in parsed:
if not isinstance(paramlist, dict):
self.app.warn(
("Invalid parameter definition ``%s``. Expected "
"format: ``name: reference``. "
" Skipping." % paramlist),
(self.state_machine.node.source,
self.state_machine.node.line))
continue
for name, ref in paramlist.items():
if ref in lookup:
new_content.append((name, lookup[ref]))
else:
self.app.warn(
("No field definition for ``%s`` found in ``%s``. "
" Skipping." % (ref, fpath)),
(self.state_machine.node.source,
self.state_machine.node.line))
# self.app.info("New content %s" % new_content)
self.yaml = new_content | Collect Parameter stanzas from inline + file.
This allows use to reference an external file for the actual
parameter definitions. | entailment |
def get_libsodium():
'''Locate the libsodium C library'''
__SONAMES = (13, 10, 5, 4)
# Import libsodium from system
sys_sodium = ctypes.util.find_library('sodium')
if sys_sodium is None:
sys_sodium = ctypes.util.find_library('libsodium')
if sys_sodium:
try:
return ctypes.CDLL(sys_sodium)
except OSError:
pass
# Import from local path
if sys.platform.startswith('win'):
try:
return ctypes.cdll.LoadLibrary('libsodium')
except OSError:
pass
for soname_ver in __SONAMES:
try:
return ctypes.cdll.LoadLibrary(
'libsodium-{0}'.format(soname_ver)
)
except OSError:
pass
elif sys.platform.startswith('darwin'):
try:
return ctypes.cdll.LoadLibrary('libsodium.dylib')
except OSError:
try:
libidx = __file__.find('lib')
if libidx > 0:
libpath = __file__[0:libidx+3] + '/libsodium.dylib'
return ctypes.cdll.LoadLibrary(libpath)
except OSError:
pass
else:
try:
return ctypes.cdll.LoadLibrary('libsodium.so')
except OSError:
pass
for soname_ver in __SONAMES:
try:
return ctypes.cdll.LoadLibrary(
'libsodium.so.{0}'.format(soname_ver)
)
except OSError:
pass | Locate the libsodium C library | entailment |
def main(args=None):
"""Main function for usage of psyplot from the command line
This function creates a parser that parses command lines to the
:func:`make_plot` functions or (if the ``psyplot_gui`` module is
present, to the :func:`psyplot_gui.start_app` function)
Returns
-------
psyplot.parser.FuncArgParser
The parser that has been used from the command line"""
try:
from psyplot_gui import get_parser as _get_parser
except ImportError:
logger.debug('Failed to import gui', exc_info=True)
parser = get_parser(create=False)
parser.update_arg('output', required=True)
parser.create_arguments()
parser.parse2func(args)
else:
parser = _get_parser(create=False)
parser.create_arguments()
parser.parse_known2func(args) | Main function for usage of psyplot from the command line
This function creates a parser that parses command lines to the
:func:`make_plot` functions or (if the ``psyplot_gui`` module is
present, to the :func:`psyplot_gui.start_app` function)
Returns
-------
psyplot.parser.FuncArgParser
The parser that has been used from the command line | entailment |
def make_plot(fnames=[], name=[], dims=None, plot_method=None,
output=None, project=None, engine=None, formatoptions=None,
tight=False, rc_file=None, encoding=None, enable_post=False,
seaborn_style=None, output_project=None,
concat_dim=get_default_value(xr.open_mfdataset, 'concat_dim'),
chname={}):
"""
Eventually start the QApplication or only make a plot
Parameters
----------
fnames: list of str
Either the filenames to show, or, if the `project` parameter is set,
the a list of `,`-separated filenames to make a mapping from the
original filename to a new one
name: list of str
The variable names to plot if the `output` parameter is set
dims: dict
A mapping from coordinate names to integers if the `project` is not
given
plot_method: str
The name of the plot_method to use
output: str or list of str
If set, the data is loaded and the figures are saved to the specified
filename and now graphical user interface is shown
project: str
If set, the project located at the given file name is loaded
engine: str
The engine to use for opening the dataset (see
:func:`psyplot.data.open_dataset`)
formatoptions: dict
A dictionary of formatoption that is applied to the data visualized by
the chosen `plot_method`
tight: bool
If True/set, it is tried to figure out the tight bbox of the figure and
adjust the paper size of the `output` to it
rc_file: str
The path to a yaml configuration file that can be used to update the
:attr:`~psyplot.config.rcsetup.rcParams`
encoding: str
The encoding to use for loading the project. If None, it is
automatically determined by pickle. Note: Set this to ``'latin1'``
if using a project created with python2 on python3.
enable_post: bool
Enable the :attr:`~psyplot.plotter.Plotter.post` processing
formatoption. If True/set, post processing scripts are enabled in the
given `project`. Only set this if you are sure that you can trust the
given project file because it may be a security vulnerability.
seaborn_style: str
The name of the style of the seaborn package that can be used for
the :func:`seaborn.set_style` function
output_project: str
The name of a project file to save the project to
concat_dim: str
The concatenation dimension if multiple files in `fnames` are
provided
chname: dict
A mapping from variable names in the project to variable names in the
datasets that should be used instead
"""
if project is not None and (name != [] or dims is not None):
warn('The `name` and `dims` parameter are ignored if the `project`'
' parameter is set!')
if rc_file is not None:
rcParams.load_from_file(rc_file)
if dims is not None and not isinstance(dims, dict):
dims = dict(chain(*map(six.iteritems, dims)))
if len(output) == 1:
output = output[0]
if not fnames and not project:
raise ValueError(
"Either a filename or a project file must be provided if "
"the output parameter is set!")
elif project is None and plot_method is None:
raise ValueError(
"A plotting method must be provided if the output parameter "
"is set and not the project!")
if seaborn_style is not None:
import seaborn as sns
sns.set_style(seaborn_style)
import psyplot.project as psy
if project is not None:
fnames = [s.split(',') for s in fnames]
chname = dict(chname)
single_files = (l[0] for l in fnames if len(l) == 1)
alternative_paths = defaultdict(lambda: next(single_files, None))
alternative_paths.update([l for l in fnames if len(l) == 2])
p = psy.Project.load_project(
project, alternative_paths=alternative_paths,
engine=engine, encoding=encoding, enable_post=enable_post,
chname=chname)
if formatoptions is not None:
p.update(fmt=formatoptions)
p.export(output, tight=tight)
else:
pm = getattr(psy.plot, plot_method, None)
if pm is None:
raise ValueError("Unknown plot method %s!" % plot_method)
kwargs = {'name': name} if name else {}
p = pm(
fnames, dims=dims or {}, engine=engine,
fmt=formatoptions or {}, mf_mode=True, concat_dim=concat_dim,
**kwargs)
p.export(output, tight=tight)
if output_project is not None:
p.save_project(output_project)
return | Eventually start the QApplication or only make a plot
Parameters
----------
fnames: list of str
Either the filenames to show, or, if the `project` parameter is set,
the a list of `,`-separated filenames to make a mapping from the
original filename to a new one
name: list of str
The variable names to plot if the `output` parameter is set
dims: dict
A mapping from coordinate names to integers if the `project` is not
given
plot_method: str
The name of the plot_method to use
output: str or list of str
If set, the data is loaded and the figures are saved to the specified
filename and now graphical user interface is shown
project: str
If set, the project located at the given file name is loaded
engine: str
The engine to use for opening the dataset (see
:func:`psyplot.data.open_dataset`)
formatoptions: dict
A dictionary of formatoption that is applied to the data visualized by
the chosen `plot_method`
tight: bool
If True/set, it is tried to figure out the tight bbox of the figure and
adjust the paper size of the `output` to it
rc_file: str
The path to a yaml configuration file that can be used to update the
:attr:`~psyplot.config.rcsetup.rcParams`
encoding: str
The encoding to use for loading the project. If None, it is
automatically determined by pickle. Note: Set this to ``'latin1'``
if using a project created with python2 on python3.
enable_post: bool
Enable the :attr:`~psyplot.plotter.Plotter.post` processing
formatoption. If True/set, post processing scripts are enabled in the
given `project`. Only set this if you are sure that you can trust the
given project file because it may be a security vulnerability.
seaborn_style: str
The name of the style of the seaborn package that can be used for
the :func:`seaborn.set_style` function
output_project: str
The name of a project file to save the project to
concat_dim: str
The concatenation dimension if multiple files in `fnames` are
provided
chname: dict
A mapping from variable names in the project to variable names in the
datasets that should be used instead | entailment |
def get_parser(create=True):
"""Return a parser to make that can be used to make plots or open files
from the command line
Returns
-------
psyplot.parser.FuncArgParser
The :class:`argparse.ArgumentParser` instance"""
#: The parse that is used to parse arguments from the command line
epilog = docstrings.get_sections(docstrings.dedents("""
Examples
--------
Here are some examples on how to use psyplot from the command line.
Plot the variable ``'t2m'`` in a netCDF file ``'myfile.nc'`` and save
the plot to ``'plot.pdf'``::
$ psyplot myfile.nc -n t2m -pm mapplot -o test.pdf
Create two plots for ``'t2m'`` with the first and second timestep on
the second vertical level::
$ psyplot myfile.nc -n t2m -pm mapplot -o test.pdf -d t,0,1 z,1
If you have save a project using the
:meth:`psyplot.project.Project.save_project` method into a file named
``'project.pkl'``, you can replot this via::
$ psyplot -p project.pkl -o test.pdf
If you use a different dataset than the one you used in the project
(e.g. ``'other_ds.nc'``), you can replace it via::
$ psyplot other_dataset.nc -p project.pkl -o test.pdf
or explicitly via::
$ psyplot old_ds.nc,other_ds.nc -p project.pkl -o test.pdf
You can also load formatoptions from a configuration file, e.g.::
$ echo 'title: my title' > fmt.yaml
$ psyplot myfile.nc -n t2m -pm mapplot -fmt fmt.yaml -o test.pdf
"""), 'parser', ['Examples'])
if _on_rtd: # make a rubric examples section
epilog = '.. rubric:: Examples\n' + '\n'.join(epilog.splitlines()[2:])
parser = FuncArgParser(
description="""
Load a dataset, make the plot and save the result to a file""",
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
info_grp = parser.add_argument_group(
'Info options',
'Options that print informations and quit afterwards')
parser.update_arg('version', short='V', long='version', action='version',
version=psyplot.__version__, if_existent=False,
group=info_grp)
parser.update_arg('all_versions', short='aV', long='all-versions',
action=AllVersionsAction, if_existent=False,
group=info_grp)
parser.update_arg('list_plugins', short='lp', long='list-plugins',
action=ListPluginsAction, if_existent=False,
group=info_grp)
parser.update_arg(
'list_plot_methods', short='lpm', long='list-plot-methods',
action=ListPlotMethodsAction, if_existent=False, group=info_grp)
parser.update_arg(
'list_datasets', short='lds', long='list-datasets',
action=ListDsNamesAction, if_existent=False, group=info_grp,
help="""List the used dataset names in the given `project`.""")
parser.setup_args(make_plot)
output_grp = parser.add_argument_group(
'Output options',
'Options that only have an effect if the `-o` option is set.')
parser.update_arg('fnames', positional=True, nargs='*')
parser.update_arg('name', short='n', nargs='*', metavar='variable_name',
const=None)
parser.update_arg('dims', short='d', nargs='+', type=_load_dims,
metavar='dim,val1[,val2[,...]]')
pm_choices = {pm for pm, d in filter(
lambda t: t[1].get('plot_func', True),
six.iteritems(rcParams['project.plotters']))}
if psyplot._project_imported:
import psyplot.project as psy
pm_choices.update(set(psy.plot._plot_methods))
parser.update_arg('plot_method', short='pm', choices=pm_choices,
metavar='{%s}' % ', '.join(map(repr, pm_choices)))
parser.update_arg('output', short='o', group=output_grp)
parser.update_arg('output_project', short='op', group=output_grp)
parser.update_arg('project', short='p')
parser.update_arg(
'formatoptions', short='fmt', type=_load_dict, help="""
The path to a yaml (``'.yml'`` or ``'.yaml'``) or pickle file
defining a dictionary of formatoption that is applied to the data
visualized by the chosen `plot_method`""", metavar='FILENAME')
parser.update_arg(
'chname', type=lambda s: s.split(','), nargs='*', help="""
A mapping from variable names in the project to variable names in the
datasets that should be used instead. Variable names should be
separated by a comma.""", metavar='project-variable,variable-to-use')
parser.update_arg('tight', short='t', group=output_grp)
parser.update_arg('rc_file', short='rc')
parser.pop_key('rc_file', 'metavar')
parser.update_arg('encoding', short='e')
parser.pop_key('enable_post', 'short')
parser.update_arg('seaborn_style', short='sns')
parser.update_arg('concat_dim', short='cd')
if create:
parser.create_arguments()
return parser | Return a parser to make that can be used to make plots or open files
from the command line
Returns
-------
psyplot.parser.FuncArgParser
The :class:`argparse.ArgumentParser` instance | entailment |
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
if _scrypt_ll:
out = ctypes.create_string_buffer(olen)
if _scrypt_ll(password, len(password), salt, len(salt),
N, r, p, out, olen):
raise ValueError
return out.raw
if len(salt) != _scrypt_salt or r != 8 or (p & (p - 1)) or (N*p <= 512):
return scr_mod.scrypt(password, salt, N, r, p, olen)
s = next(i for i in range(1, 64) if 2**i == N)
t = next(i for i in range(0, 30) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
if s > 53 or t + s > 58:
raise ValueError
out = ctypes.create_string_buffer(olen)
if _scrypt(out, olen, password, len(password), salt, o, m) != 0:
raise ValueError
return out.raw | Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful. | entailment |
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if N < 2 or (N & (N - 1)):
raise ValueError('scrypt N must be a power of 2 greater than 1')
if p > 255 or p < 1:
raise ValueError('scrypt_mcf p out of range [1,255]')
if N > 2**31:
raise ValueError('scrypt_mcf N out of range [2,2**31]')
if (salt is not None or r != 8 or (p & (p - 1)) or (N*p <= 512) or
prefix not in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_ANY) or
_scrypt_ll):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
s = next(i for i in range(1, 32) if 2**i == N)
t = next(i for i in range(0, 8) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
mcf = ctypes.create_string_buffer(102)
if _scrypt_str(mcf, password, len(password), o, m) != 0:
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
if prefix in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_ANY):
return mcf.raw.strip(b'\0')
_N, _r, _p, salt, hash, olen = mcf_mod._scrypt_mcf_decode_7(mcf.raw[:-1])
assert _N == N and _r == r and _p == p, (_N, _r, _p, N, r, p, o, m)
return mcf_mod._scrypt_mcf_encode_s1(N, r, p, salt, hash) | Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.) | entailment |
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if mcf_mod._scrypt_mcf_7_is_standard(mcf) and not _scrypt_ll:
return _scrypt_str_chk(mcf, password, len(password)) == 0
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password) | Returns True if the password matches the given MCF hash | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.