sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def best_oob_mae_weight(trees):
"""
Returns weights so that the tree with smallest out-of-bag mean absolute error
"""
best = (+1e999999, None)
for tree in trees:
oob_mae = tree.out_of_bag_mae
if oob_mae is None or oob_mae.mean is None:
continue
best = min(best, (oob_mae.mean, tree))
best_mae, best_tree = best
if best_tree is None:
return
return [(1.0, best_tree)]
|
Returns weights so that the tree with smallest out-of-bag mean absolute error
|
entailment
|
def mean_oob_mae_weight(trees):
"""
Returns weights proportional to the out-of-bag mean absolute error for each tree.
"""
weights = []
active_trees = []
for tree in trees:
oob_mae = tree.out_of_bag_mae
if oob_mae is None or oob_mae.mean is None:
continue
weights.append(oob_mae.mean)
active_trees.append(tree)
if not active_trees:
return
weights = normalize(weights)
return zip(weights, active_trees)
|
Returns weights proportional to the out-of-bag mean absolute error for each tree.
|
entailment
|
def _grow_trees(self):
"""
Adds new trees to the forest according to the specified growth method.
"""
if self.grow_method == GROW_AUTO_INCREMENTAL:
self.tree_kwargs['auto_grow'] = True
while len(self.trees) < self.size:
self.trees.append(Tree(data=self.data, **self.tree_kwargs))
|
Adds new trees to the forest according to the specified growth method.
|
entailment
|
def predict(self, record):
"""
Attempts to predict the value of the class attribute by aggregating
the predictions of each tree.
Parameters:
weighting_formula := a callable that takes a list of trees and
returns a list of weights.
"""
# Get raw predictions.
# {tree:raw prediction}
predictions = {}
for tree in self.trees:
_p = tree.predict(record)
if _p is None:
continue
if isinstance(_p, CDist):
if _p.mean is None:
continue
elif isinstance(_p, DDist):
if not _p.count:
continue
predictions[tree] = _p
if not predictions:
return
# Normalize weights and aggregate final prediction.
weights = self.weighting_method(predictions.keys())
if not weights:
return
# assert sum(weights) == 1.0, "Sum of weights must equal 1."
if self.data.is_continuous_class:
# Merge continuous class predictions.
total = sum(w*predictions[tree].mean for w, tree in weights)
else:
# Merge discrete class predictions.
total = DDist()
for weight, tree in weights:
prediction = predictions[tree]
for cls_value, cls_prob in prediction.probs:
total.add(cls_value, cls_prob*weight)
return total
|
Attempts to predict the value of the class attribute by aggregating
the predictions of each tree.
Parameters:
weighting_formula := a callable that takes a list of trees and
returns a list of weights.
|
entailment
|
def train(self, record):
"""
Updates the trees with the given training record.
"""
self._fell_trees()
self._grow_trees()
for tree in self.trees:
if random.random() < self.sample_ratio:
tree.train(record)
else:
tree.out_of_bag_samples.append(record)
while len(tree.out_of_bag_samples) > self.max_out_of_bag_samples:
tree.out_of_bag_samples.pop(0)
|
Updates the trees with the given training record.
|
entailment
|
def get_configfile_paths(system=True, user=True, local=True, only_existing=True):
"""Return a list of local configuration file paths.
Search paths for configuration files on the local system
are based on homebase_ and depend on operating system; for example, for Linux systems
these might include ``dwave.conf`` in the current working directory (CWD),
user-local ``.config/dwave/``, and system-wide ``/etc/dwave/``.
.. _homebase: https://github.com/dwavesystems/homebase
Args:
system (boolean, default=True):
Search for system-wide configuration files.
user (boolean, default=True):
Search for user-local configuration files.
local (boolean, default=True):
Search for local configuration files (in CWD).
only_existing (boolean, default=True):
Return only paths for files that exist on the local system.
Returns:
list[str]:
List of configuration file paths.
Examples:
This example displays all paths to configuration files on a Windows system
running Python 2.7 and then finds the single existing configuration file.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
[u'C:\\ProgramData\\dwavesystem\\dwave\\dwave.conf',
u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf',
'.\\dwave.conf']
>>> # Find existing files
>>> dc.config.get_configfile_paths() # doctest: +SKIP
[u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
"""
candidates = []
# system-wide has the lowest priority, `/etc/dwave/dwave.conf`
if system:
candidates.extend(homebase.site_config_dir_list(
app_author=CONF_AUTHOR, app_name=CONF_APP,
use_virtualenv=False, create=False))
# user-local will override it, `~/.config/dwave/dwave.conf`
if user:
candidates.append(homebase.user_config_dir(
app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False,
use_virtualenv=False, create=False))
# highest priority (overrides all): `./dwave.conf`
if local:
candidates.append(".")
paths = [os.path.join(base, CONF_FILENAME) for base in candidates]
if only_existing:
paths = list(filter(os.path.exists, paths))
return paths
|
Return a list of local configuration file paths.
Search paths for configuration files on the local system
are based on homebase_ and depend on operating system; for example, for Linux systems
these might include ``dwave.conf`` in the current working directory (CWD),
user-local ``.config/dwave/``, and system-wide ``/etc/dwave/``.
.. _homebase: https://github.com/dwavesystems/homebase
Args:
system (boolean, default=True):
Search for system-wide configuration files.
user (boolean, default=True):
Search for user-local configuration files.
local (boolean, default=True):
Search for local configuration files (in CWD).
only_existing (boolean, default=True):
Return only paths for files that exist on the local system.
Returns:
list[str]:
List of configuration file paths.
Examples:
This example displays all paths to configuration files on a Windows system
running Python 2.7 and then finds the single existing configuration file.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
[u'C:\\ProgramData\\dwavesystem\\dwave\\dwave.conf',
u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf',
'.\\dwave.conf']
>>> # Find existing files
>>> dc.config.get_configfile_paths() # doctest: +SKIP
[u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
|
entailment
|
def get_default_configfile_path():
"""Return the default configuration-file path.
Typically returns a user-local configuration file; e.g:
``~/.config/dwave/dwave.conf``.
Returns:
str:
Configuration file path.
Examples:
This example displays the default configuration file on an Ubuntu Unix system
running IPython 2.7.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
['/etc/xdg/xdg-ubuntu/dwave/dwave.conf',
'/usr/share/upstart/xdg/dwave/dwave.conf',
'/etc/xdg/dwave/dwave.conf',
'/home/mary/.config/dwave/dwave.conf',
'./dwave.conf']
>>> # Find default configuration path
>>> dc.config.get_default_configfile_path() # doctest: +SKIP
'/home/mary/.config/dwave/dwave.conf'
"""
base = homebase.user_config_dir(
app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False,
use_virtualenv=False, create=False)
path = os.path.join(base, CONF_FILENAME)
return path
|
Return the default configuration-file path.
Typically returns a user-local configuration file; e.g:
``~/.config/dwave/dwave.conf``.
Returns:
str:
Configuration file path.
Examples:
This example displays the default configuration file on an Ubuntu Unix system
running IPython 2.7.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
['/etc/xdg/xdg-ubuntu/dwave/dwave.conf',
'/usr/share/upstart/xdg/dwave/dwave.conf',
'/etc/xdg/dwave/dwave.conf',
'/home/mary/.config/dwave/dwave.conf',
'./dwave.conf']
>>> # Find default configuration path
>>> dc.config.get_default_configfile_path() # doctest: +SKIP
'/home/mary/.config/dwave/dwave.conf'
|
entailment
|
def load_config_from_files(filenames=None):
"""Load D-Wave Cloud Client configuration from a list of files.
.. note:: This method is not standardly used to set up D-Wave Cloud Client configuration.
It is recommended you use :meth:`.Client.from_config` or
:meth:`.config.load_config` instead.
Configuration files comply with standard Windows INI-like format,
parsable with Python's :mod:`configparser`. A section called
``defaults`` contains default values inherited by other sections.
Each filename in the list (each configuration file loaded) progressively upgrades
the final configuration, on a key by key basis, per each section.
Args:
filenames (list[str], default=None):
D-Wave Cloud Client configuration files (paths and names).
If ``None``, searches for a configuration file named ``dwave.conf``
in all system-wide configuration directories, in the user-local
configuration directory, and in the current working directory,
following the user/system configuration paths of :func:`get_configfile_paths`.
Returns:
:obj:`~configparser.ConfigParser`:
:class:`dict`-like mapping of configuration sections (profiles) to
mapping of per-profile keys holding values.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples:
This example loads configurations from two files. One contains a default
section with key/values that are overwritten by any profile section that
contains that key/value; for example, profile dw2000b in file dwave_b.conf
overwrites the default URL and client type, which profile dw2000a inherits
from the defaults section, while profile dw2000a overwrites the API token that
profile dw2000b inherits.
The files, which are located in the current working directory, are
(1) dwave_a.conf::
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
solver = EXAMPLE_2000Q_SYSTEM
token = DEF-987654321987654321987654321
and (2) dwave_b.conf::
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = sw
solver = EXAMPLE_2000Q_SYSTEM
The following example code loads configuration from both these files, with
the defined overrides and inheritance.
.. code:: python
>>> import dwave.cloud as dc
>>> import sys
>>> configuration = dc.config.load_config_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP
>>> configuration.write(sys.stdout) # doctest: +SKIP
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
solver = EXAMPLE_2000Q_SYSTEM
token = DEF-987654321987654321987654321
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = sw
solver = EXAMPLE_2000Q_SYSTEM
"""
if filenames is None:
filenames = get_configfile_paths()
config = configparser.ConfigParser(default_section="defaults")
for filename in filenames:
try:
with open(filename, 'r') as f:
config.read_file(f, filename)
except (IOError, OSError):
raise ConfigFileReadError("Failed to read {!r}".format(filename))
except configparser.Error:
raise ConfigFileParseError("Failed to parse {!r}".format(filename))
return config
|
Load D-Wave Cloud Client configuration from a list of files.
.. note:: This method is not standardly used to set up D-Wave Cloud Client configuration.
It is recommended you use :meth:`.Client.from_config` or
:meth:`.config.load_config` instead.
Configuration files comply with standard Windows INI-like format,
parsable with Python's :mod:`configparser`. A section called
``defaults`` contains default values inherited by other sections.
Each filename in the list (each configuration file loaded) progressively upgrades
the final configuration, on a key by key basis, per each section.
Args:
filenames (list[str], default=None):
D-Wave Cloud Client configuration files (paths and names).
If ``None``, searches for a configuration file named ``dwave.conf``
in all system-wide configuration directories, in the user-local
configuration directory, and in the current working directory,
following the user/system configuration paths of :func:`get_configfile_paths`.
Returns:
:obj:`~configparser.ConfigParser`:
:class:`dict`-like mapping of configuration sections (profiles) to
mapping of per-profile keys holding values.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples:
This example loads configurations from two files. One contains a default
section with key/values that are overwritten by any profile section that
contains that key/value; for example, profile dw2000b in file dwave_b.conf
overwrites the default URL and client type, which profile dw2000a inherits
from the defaults section, while profile dw2000a overwrites the API token that
profile dw2000b inherits.
The files, which are located in the current working directory, are
(1) dwave_a.conf::
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
solver = EXAMPLE_2000Q_SYSTEM
token = DEF-987654321987654321987654321
and (2) dwave_b.conf::
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = sw
solver = EXAMPLE_2000Q_SYSTEM
The following example code loads configuration from both these files, with
the defined overrides and inheritance.
.. code:: python
>>> import dwave.cloud as dc
>>> import sys
>>> configuration = dc.config.load_config_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP
>>> configuration.write(sys.stdout) # doctest: +SKIP
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
solver = EXAMPLE_2000Q_SYSTEM
token = DEF-987654321987654321987654321
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = sw
solver = EXAMPLE_2000Q_SYSTEM
|
entailment
|
def load_profile_from_files(filenames=None, profile=None):
"""Load a profile from a list of D-Wave Cloud Client configuration files.
.. note:: This method is not standardly used to set up D-Wave Cloud Client configuration.
It is recommended you use :meth:`.Client.from_config` or
:meth:`.config.load_config` instead.
Configuration files comply with standard Windows INI-like format,
parsable with Python's :mod:`configparser`.
Each file in the list is progressively searched until the first profile is found.
This function does not input profile information from environment variables.
Args:
filenames (list[str], default=None):
D-Wave cloud client configuration files (path and name). If ``None``,
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
profile (str, default=None):
Name of profile to return from reading the configuration from the specified
configuration file(s). If ``None``, progressively falls back in the
following order:
(1) ``profile`` key following ``[defaults]`` section.
(2) First non-``[defaults]`` section.
(3) ``[defaults]`` section.
Returns:
dict:
Mapping of configuration keys to values. If no valid config/profile
is found, returns an empty dict.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
:exc:`ValueError`:
Profile name not found.
Examples:
This example loads a profile based on configurations from two files. It
finds the first profile, dw2000a, in the first file, dwave_a.conf, and adds to
the values of the defaults section, overwriting the existing client value,
while ignoring the profile in the second file, dwave_b.conf.
The files, which are located in the current working directory, are
(1) dwave_a.conf::
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
client = sw
solver = EXAMPLE_2000Q_SYSTEM_A
token = DEF-987654321987654321987654321
and (2) dwave_b.conf::
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = qpu
solver = EXAMPLE_2000Q_SYSTEM_B
The following example code loads profile values from parsing both these files,
by default loading the first profile encountered or an explicitly specified profile.
>>> import dwave.cloud as dc
>>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP
{'client': u'sw',
'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
'solver': u'EXAMPLE_2000Q_SYSTEM_A',
'token': u'DEF-987654321987654321987654321'}
>>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"],
... profile='dw2000b') # doctest: +SKIP
{'client': u'qpu',
'endpoint': u'https://url.of.some.other.dwavesystem.com/sapi',
'solver': u'EXAMPLE_2000Q_SYSTEM_B',
'token': u'ABC-123456789123456789123456789'}
"""
# progressively build config from a file, or a list of auto-detected files
# raises ConfigFileReadError/ConfigFileParseError on error
config = load_config_from_files(filenames)
# determine profile name fallback:
# (1) profile key under [defaults],
# (2) first non-[defaults] section
# (3) [defaults] section
first_section = next(iter(config.sections() + [None]))
config_defaults = config.defaults()
if not profile:
profile = config_defaults.get('profile', first_section)
if profile:
try:
section = dict(config[profile])
except KeyError:
raise ValueError("Config profile {!r} not found".format(profile))
else:
# as the very last resort (unspecified profile name and
# no profiles defined in config), try to use [defaults]
if config_defaults:
section = config_defaults
else:
section = {}
return section
|
Load a profile from a list of D-Wave Cloud Client configuration files.
.. note:: This method is not standardly used to set up D-Wave Cloud Client configuration.
It is recommended you use :meth:`.Client.from_config` or
:meth:`.config.load_config` instead.
Configuration files comply with standard Windows INI-like format,
parsable with Python's :mod:`configparser`.
Each file in the list is progressively searched until the first profile is found.
This function does not input profile information from environment variables.
Args:
filenames (list[str], default=None):
D-Wave cloud client configuration files (path and name). If ``None``,
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
profile (str, default=None):
Name of profile to return from reading the configuration from the specified
configuration file(s). If ``None``, progressively falls back in the
following order:
(1) ``profile`` key following ``[defaults]`` section.
(2) First non-``[defaults]`` section.
(3) ``[defaults]`` section.
Returns:
dict:
Mapping of configuration keys to values. If no valid config/profile
is found, returns an empty dict.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
:exc:`ValueError`:
Profile name not found.
Examples:
This example loads a profile based on configurations from two files. It
finds the first profile, dw2000a, in the first file, dwave_a.conf, and adds to
the values of the defaults section, overwriting the existing client value,
while ignoring the profile in the second file, dwave_b.conf.
The files, which are located in the current working directory, are
(1) dwave_a.conf::
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
client = sw
solver = EXAMPLE_2000Q_SYSTEM_A
token = DEF-987654321987654321987654321
and (2) dwave_b.conf::
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = qpu
solver = EXAMPLE_2000Q_SYSTEM_B
The following example code loads profile values from parsing both these files,
by default loading the first profile encountered or an explicitly specified profile.
>>> import dwave.cloud as dc
>>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP
{'client': u'sw',
'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
'solver': u'EXAMPLE_2000Q_SYSTEM_A',
'token': u'DEF-987654321987654321987654321'}
>>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"],
... profile='dw2000b') # doctest: +SKIP
{'client': u'qpu',
'endpoint': u'https://url.of.some.other.dwavesystem.com/sapi',
'solver': u'EXAMPLE_2000Q_SYSTEM_B',
'token': u'ABC-123456789123456789123456789'}
|
entailment
|
def load_config(config_file=None, profile=None, client=None,
endpoint=None, token=None, solver=None, proxy=None):
"""Load D-Wave Cloud Client configuration based on a configuration file.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`load_config()`. These values replace
values read from a configuration file, and therefore must be **strings**, including float
values for timeouts, boolean flags (tested for "truthiness"), and solver feature
constraints (a dictionary encoded as JSON).
2. Values specified as environment variables.
3. Values specified in the configuration file.
Configuration-file format is described in :mod:`dwave.cloud.config`.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``,
``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file(s).
If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If `False`, loading from file(s) is skipped; if `True`, forces auto-detection
(regardless of the `DWAVE_CONFIG_FILE` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from `DWAVE_PROFILE` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides `[defaults]`, the defaults
section is promoted and selected.
client (str, default=None):
Client type used for accessing the API. Supported values are `qpu`
for :class:`dwave.cloud.qpu.Client` and `sw` for
:class:`dwave.cloud.sw.Client`.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (str, default=None):
:term:`solver` features, as a JSON-encoded dictionary of feature constraints,
the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for
semantics of supported feature constraints.
If undefined, the client uses a solver definition from environment variables,
a configuration file, or falls back to the first available online solver.
For backward compatibility, solver name in string format is accepted and
converted to ``{"name": <solver name>}``.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
Returns:
dict:
Mapping of configuration keys to values for the profile
(section), as read from the configuration file and optionally overridden by
environment values and specified keyword arguments.
Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy`
keys.
Raises:
:exc:`ValueError`:
Invalid (non-existing) profile name.
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples
This example loads the configuration from an auto-detected configuration file
in the home directory of a Windows system user.
>>> import dwave.cloud as dc
>>> dc.config.load_config()
{'client': u'qpu',
'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
'proxy': None,
'solver': u'EXAMPLE_2000Q_SYSTEM_A',
'token': u'DEF-987654321987654321987654321'}
>>> See which configuration file was loaded
>>> dc.config.get_configfile_paths()
[u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
Additional examples are given in :mod:`dwave.cloud.config`.
"""
if profile is None:
profile = os.getenv("DWAVE_PROFILE")
if config_file == False:
# skip loading from file altogether
section = {}
elif config_file == True:
# force auto-detection, disregarding DWAVE_CONFIG_FILE
section = load_profile_from_files(None, profile)
else:
# auto-detect if not specified with arg or env
if config_file is None:
# note: both empty and undefined DWAVE_CONFIG_FILE treated as None
config_file = os.getenv("DWAVE_CONFIG_FILE")
# handle ''/None/str/[str] for `config_file` (after env)
filenames = None
if config_file:
if isinstance(config_file, six.string_types):
filenames = [config_file]
else:
filenames = config_file
section = load_profile_from_files(filenames, profile)
# override a selected subset of values via env or kwargs,
# pass-through the rest unmodified
section['client'] = client or os.getenv("DWAVE_API_CLIENT", section.get('client'))
section['endpoint'] = endpoint or os.getenv("DWAVE_API_ENDPOINT", section.get('endpoint'))
section['token'] = token or os.getenv("DWAVE_API_TOKEN", section.get('token'))
section['solver'] = solver or os.getenv("DWAVE_API_SOLVER", section.get('solver'))
section['proxy'] = proxy or os.getenv("DWAVE_API_PROXY", section.get('proxy'))
return section
|
Load D-Wave Cloud Client configuration based on a configuration file.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`load_config()`. These values replace
values read from a configuration file, and therefore must be **strings**, including float
values for timeouts, boolean flags (tested for "truthiness"), and solver feature
constraints (a dictionary encoded as JSON).
2. Values specified as environment variables.
3. Values specified in the configuration file.
Configuration-file format is described in :mod:`dwave.cloud.config`.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``,
``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file(s).
If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If `False`, loading from file(s) is skipped; if `True`, forces auto-detection
(regardless of the `DWAVE_CONFIG_FILE` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from `DWAVE_PROFILE` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides `[defaults]`, the defaults
section is promoted and selected.
client (str, default=None):
Client type used for accessing the API. Supported values are `qpu`
for :class:`dwave.cloud.qpu.Client` and `sw` for
:class:`dwave.cloud.sw.Client`.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (str, default=None):
:term:`solver` features, as a JSON-encoded dictionary of feature constraints,
the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for
semantics of supported feature constraints.
If undefined, the client uses a solver definition from environment variables,
a configuration file, or falls back to the first available online solver.
For backward compatibility, solver name in string format is accepted and
converted to ``{"name": <solver name>}``.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
Returns:
dict:
Mapping of configuration keys to values for the profile
(section), as read from the configuration file and optionally overridden by
environment values and specified keyword arguments.
Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy`
keys.
Raises:
:exc:`ValueError`:
Invalid (non-existing) profile name.
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples
This example loads the configuration from an auto-detected configuration file
in the home directory of a Windows system user.
>>> import dwave.cloud as dc
>>> dc.config.load_config()
{'client': u'qpu',
'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
'proxy': None,
'solver': u'EXAMPLE_2000Q_SYSTEM_A',
'token': u'DEF-987654321987654321987654321'}
>>> See which configuration file was loaded
>>> dc.config.get_configfile_paths()
[u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
Additional examples are given in :mod:`dwave.cloud.config`.
|
entailment
|
def legacy_load_config(profile=None, endpoint=None, token=None, solver=None,
proxy=None, **kwargs):
"""Load configured URLs and token for the SAPI server.
.. warning:: Included only for backward compatibility. Please use
:func:`load_config` or the client factory
:meth:`~dwave.cloud.client.Client.from_config` instead.
This method tries to load a legacy configuration file from ``~/.dwrc``, select a
specified `profile` (or, if not specified, the first profile), and override
individual keys with values read from environment variables or
specified explicitly as key values in the function.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`legacy_load_config()`
2. Values specified as environment variables
3. Values specified in the legacy ``~/.dwrc`` configuration file
Environment variables searched for are:
- ``DW_INTERNAL__HTTPLINK``
- ``DW_INTERNAL__TOKEN``
- ``DW_INTERNAL__HTTPPROXY``
- ``DW_INTERNAL__SOLVER``
Legacy configuration file format is a modified CSV where the first comma is
replaced with a bar character (``|``). Each line encodes a single profile. Its
columns are::
profile_name|endpoint_url,authentication_token,proxy_url,default_solver_name
All its fields after ``authentication_token`` are optional.
When there are multiple connections in a file, the first one is
the default. Any commas in the URLs must be percent-encoded.
Args:
profile (str):
Profile name in the legacy configuration file.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (str, default=None):
Default solver to use in :meth:`~dwave.cloud.client.Client.get_solver`.
If undefined, all calls to :meth:`~dwave.cloud.client.Client.get_solver`
must explicitly specify the solver name/id.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client uses a
system-level proxy, if defined, or connects directly to the API.
Returns:
Dictionary with keys: endpoint, token, solver, and proxy.
Examples:
This example creates a client using the :meth:`~dwave.cloud.client.Client.from_config`
method, which falls back on the legacy file by default when it fails to
find a D-Wave Cloud Client configuration file (setting its `legacy_config_fallback`
parameter to False precludes this fall-back operation). For this example,
no D-Wave Cloud Client configuration file is present on the local system;
instead the following ``.dwrc`` legacy configuration file is present in the
user's home directory::
profile-a|https://one.com,token-one
profile-b|https://two.com,token-two
The following example code creates a client without explicitly specifying
key values, therefore auto-detection searches for existing (non-legacy) configuration
files in the standard directories of :func:`get_configfile_paths` and, failing to
find one, falls back on the existing legacy configuration file above.
>>> import dwave.cloud as dc
>>> client = dwave.cloud.Client.from_config() # doctest: +SKIP
>>> client.endpoint # doctest: +SKIP
'https://one.com'
>>> client.token # doctest: +SKIP
'token-one'
The following examples specify a profile and/or token.
>>> # Explicitly specify a profile
>>> client = dwave.cloud.Client.from_config(profile='profile-b')
>>> # Will try to connect with the url `https://two.com` and the token `token-two`.
>>> client = dwave.cloud.Client.from_config(profile='profile-b', token='new-token')
>>> # Will try to connect with the url `https://two.com` and the token `new-token`.
"""
def _parse_config(fp, filename):
fields = ('endpoint', 'token', 'proxy', 'solver')
config = OrderedDict()
for line in fp:
# strip whitespace, skip blank and comment lines
line = line.strip()
if not line or line.startswith('#'):
continue
# parse each record, store in dict with label as key
try:
label, data = line.split('|', 1)
values = [v.strip() or None for v in data.split(',')]
config[label] = dict(zip(fields, values))
except:
raise ConfigFileParseError(
"Failed to parse {!r}, line {!r}".format(filename, line))
return config
def _read_config(filename):
try:
with open(filename, 'r') as f:
return _parse_config(f, filename)
except (IOError, OSError):
raise ConfigFileReadError("Failed to read {!r}".format(filename))
config = {}
filename = os.path.expanduser('~/.dwrc')
if os.path.exists(filename):
config = _read_config(filename)
# load profile if specified, or first one in file
if profile:
try:
section = config[profile]
except KeyError:
raise ValueError("Config profile {!r} not found".format(profile))
else:
try:
_, section = next(iter(config.items()))
except StopIteration:
section = {}
# override config variables (if any) with environment and then with arguments
section['endpoint'] = endpoint or os.getenv("DW_INTERNAL__HTTPLINK", section.get('endpoint'))
section['token'] = token or os.getenv("DW_INTERNAL__TOKEN", section.get('token'))
section['proxy'] = proxy or os.getenv("DW_INTERNAL__HTTPPROXY", section.get('proxy'))
section['solver'] = solver or os.getenv("DW_INTERNAL__SOLVER", section.get('solver'))
section.update(kwargs)
return section
|
Load configured URLs and token for the SAPI server.
.. warning:: Included only for backward compatibility. Please use
:func:`load_config` or the client factory
:meth:`~dwave.cloud.client.Client.from_config` instead.
This method tries to load a legacy configuration file from ``~/.dwrc``, select a
specified `profile` (or, if not specified, the first profile), and override
individual keys with values read from environment variables or
specified explicitly as key values in the function.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`legacy_load_config()`
2. Values specified as environment variables
3. Values specified in the legacy ``~/.dwrc`` configuration file
Environment variables searched for are:
- ``DW_INTERNAL__HTTPLINK``
- ``DW_INTERNAL__TOKEN``
- ``DW_INTERNAL__HTTPPROXY``
- ``DW_INTERNAL__SOLVER``
Legacy configuration file format is a modified CSV where the first comma is
replaced with a bar character (``|``). Each line encodes a single profile. Its
columns are::
profile_name|endpoint_url,authentication_token,proxy_url,default_solver_name
All its fields after ``authentication_token`` are optional.
When there are multiple connections in a file, the first one is
the default. Any commas in the URLs must be percent-encoded.
Args:
profile (str):
Profile name in the legacy configuration file.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (str, default=None):
Default solver to use in :meth:`~dwave.cloud.client.Client.get_solver`.
If undefined, all calls to :meth:`~dwave.cloud.client.Client.get_solver`
must explicitly specify the solver name/id.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client uses a
system-level proxy, if defined, or connects directly to the API.
Returns:
Dictionary with keys: endpoint, token, solver, and proxy.
Examples:
This example creates a client using the :meth:`~dwave.cloud.client.Client.from_config`
method, which falls back on the legacy file by default when it fails to
find a D-Wave Cloud Client configuration file (setting its `legacy_config_fallback`
parameter to False precludes this fall-back operation). For this example,
no D-Wave Cloud Client configuration file is present on the local system;
instead the following ``.dwrc`` legacy configuration file is present in the
user's home directory::
profile-a|https://one.com,token-one
profile-b|https://two.com,token-two
The following example code creates a client without explicitly specifying
key values, therefore auto-detection searches for existing (non-legacy) configuration
files in the standard directories of :func:`get_configfile_paths` and, failing to
find one, falls back on the existing legacy configuration file above.
>>> import dwave.cloud as dc
>>> client = dwave.cloud.Client.from_config() # doctest: +SKIP
>>> client.endpoint # doctest: +SKIP
'https://one.com'
>>> client.token # doctest: +SKIP
'token-one'
The following examples specify a profile and/or token.
>>> # Explicitly specify a profile
>>> client = dwave.cloud.Client.from_config(profile='profile-b')
>>> # Will try to connect with the url `https://two.com` and the token `token-two`.
>>> client = dwave.cloud.Client.from_config(profile='profile-b', token='new-token')
>>> # Will try to connect with the url `https://two.com` and the token `new-token`.
|
entailment
|
def _check_data(data):
"""Check whether `data` is a valid input/output for libsamplerate.
Returns
-------
num_frames
Number of frames in `data`.
channels
Number of channels in `data`.
Raises
------
ValueError: If invalid data is supplied.
"""
if not (data.dtype == _np.float32 and data.flags.c_contiguous):
raise ValueError('supplied data must be float32 and C contiguous')
if data.ndim == 2:
num_frames, channels = data.shape
elif data.ndim == 1:
num_frames, channels = data.size, 1
else:
raise ValueError('rank > 2 not supported')
return num_frames, channels
|
Check whether `data` is a valid input/output for libsamplerate.
Returns
-------
num_frames
Number of frames in `data`.
channels
Number of channels in `data`.
Raises
------
ValueError: If invalid data is supplied.
|
entailment
|
def src_simple(input_data, output_data, ratio, converter_type, channels):
"""Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
error = _lib.src_simple(data, converter_type, channels)
return error, data.input_frames_used, data.output_frames_gen
|
Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
|
entailment
|
def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
"""
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0]
|
Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
|
entailment
|
def src_process(state, input_data, output_data, ratio, end_of_input=0):
"""Standard processing function.
Returns non zero on error.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen
|
Standard processing function.
Returns non zero on error.
|
entailment
|
def _src_input_callback(cb_data, data):
"""Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
"""
cb_data = ffi.from_handle(cb_data)
ret = cb_data['callback']()
if ret is None:
cb_data['last_input'] = None
return 0 # No frames supplied
input_data = _np.require(ret, requirements='C', dtype=_np.float32)
input_frames, channels = _check_data(input_data)
# Check whether the correct number of channels is supplied by user.
if cb_data['channels'] != channels:
raise ValueError('Invalid number of channels in callback.')
# Store a reference of the input data to ensure it is still alive when
# accessed by libsamplerate.
cb_data['last_input'] = input_data
data[0] = ffi.cast('float*', ffi.from_buffer(input_data))
return input_frames
|
Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
|
entailment
|
def src_callback_new(callback, converter_type, channels):
"""Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
"""
cb_data = {'callback': callback, 'channels': channels}
handle = ffi.new_handle(cb_data)
error = ffi.new('int*')
state = _lib.src_callback_new(_src_input_callback, converter_type,
channels, error, handle)
if state == ffi.NULL:
return None, handle, error[0]
return state, handle, error[0]
|
Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
|
entailment
|
def src_callback_read(state, ratio, frames, data):
"""Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
"""
data_ptr = ffi.cast('float*f', ffi.from_buffer(data))
return _lib.src_callback_read(state, ratio, frames, data_ptr)
|
Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
|
entailment
|
def from_config(cls, config_file=None, profile=None, client=None,
endpoint=None, token=None, solver=None, proxy=None,
legacy_config_fallback=False, **kwargs):
"""Client factory method to instantiate a client instance from configuration.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`from_config()`
2. Values specified as environment variables
3. Values specified in the configuration file
Configuration-file format is described in :mod:`dwave.cloud.config`.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``,
``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file.
If ``None``, the value is taken from ``DWAVE_CONFIG_FILE`` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If ``False``, loading from file is skipped; if ``True``, forces auto-detection
(regardless of the ``DWAVE_CONFIG_FILE`` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from ``DWAVE_PROFILE`` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides ``[defaults]``, the defaults
section is promoted and selected.
client (str, default=None):
Client type used for accessing the API. Supported values are ``qpu``
for :class:`dwave.cloud.qpu.Client` and ``sw`` for
:class:`dwave.cloud.sw.Client`.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (dict/str, default=None):
Default :term:`solver` features to use in :meth:`~dwave.cloud.client.Client.get_solver`.
Defined via dictionary of solver feature constraints
(see :meth:`~dwave.cloud.client.Client.get_solvers`).
For backward compatibility, a solver name, as a string,
is also accepted and converted to ``{"name": <solver name>}``.
If undefined, :meth:`~dwave.cloud.client.Client.get_solver` uses a
solver definition from environment variables, a configuration file, or
falls back to the first available online solver.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
legacy_config_fallback (bool, default=False):
If True and loading from a standard D-Wave Cloud Client configuration
file (``dwave.conf``) fails, tries loading a legacy configuration file (``~/.dwrc``).
Other Parameters:
Unrecognized keys (str):
All unrecognized keys are passed through to the appropriate client class constructor
as string keyword arguments.
An explicit key value overrides an identical user-defined key value loaded from a
configuration file.
Returns:
:class:`~dwave.cloud.client.Client` (:class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`, default=:class:`dwave.cloud.qpu.Client`):
Appropriate instance of a QPU or software client.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples:
A variety of examples are given in :mod:`dwave.cloud.config`.
This example initializes :class:`~dwave.cloud.client.Client` from an
explicitly specified configuration file, "~/jane/my_path_to_config/my_cloud_conf.conf"::
>>> from dwave.cloud import Client
>>> client = Client.from_config(config_file='~/jane/my_path_to_config/my_cloud_conf.conf') # doctest: +SKIP
>>> # code that uses client
>>> client.close()
"""
# try loading configuration from a preferred new config subsystem
# (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc)
config = load_config(
config_file=config_file, profile=profile, client=client,
endpoint=endpoint, token=token, solver=solver, proxy=proxy)
_LOGGER.debug("Config loaded: %r", config)
# fallback to legacy `.dwrc` if key variables missing
if legacy_config_fallback:
warnings.warn("'legacy_config_fallback' is deprecated, please convert "
"your legacy .dwrc file to the new config format.", DeprecationWarning)
if not config.get('token'):
config = legacy_load_config(
profile=profile, client=client,
endpoint=endpoint, token=token, solver=solver, proxy=proxy)
_LOGGER.debug("Legacy config loaded: %r", config)
# manual override of other (client-custom) arguments
config.update(kwargs)
from dwave.cloud import qpu, sw
_clients = {'qpu': qpu.Client, 'sw': sw.Client, 'base': cls}
_client = config.pop('client', None) or 'base'
_LOGGER.debug("Final config used for %s.Client(): %r", _client, config)
return _clients[_client](**config)
|
Client factory method to instantiate a client instance from configuration.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`from_config()`
2. Values specified as environment variables
3. Values specified in the configuration file
Configuration-file format is described in :mod:`dwave.cloud.config`.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``,
``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file.
If ``None``, the value is taken from ``DWAVE_CONFIG_FILE`` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If ``False``, loading from file is skipped; if ``True``, forces auto-detection
(regardless of the ``DWAVE_CONFIG_FILE`` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from ``DWAVE_PROFILE`` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides ``[defaults]``, the defaults
section is promoted and selected.
client (str, default=None):
Client type used for accessing the API. Supported values are ``qpu``
for :class:`dwave.cloud.qpu.Client` and ``sw`` for
:class:`dwave.cloud.sw.Client`.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (dict/str, default=None):
Default :term:`solver` features to use in :meth:`~dwave.cloud.client.Client.get_solver`.
Defined via dictionary of solver feature constraints
(see :meth:`~dwave.cloud.client.Client.get_solvers`).
For backward compatibility, a solver name, as a string,
is also accepted and converted to ``{"name": <solver name>}``.
If undefined, :meth:`~dwave.cloud.client.Client.get_solver` uses a
solver definition from environment variables, a configuration file, or
falls back to the first available online solver.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
legacy_config_fallback (bool, default=False):
If True and loading from a standard D-Wave Cloud Client configuration
file (``dwave.conf``) fails, tries loading a legacy configuration file (``~/.dwrc``).
Other Parameters:
Unrecognized keys (str):
All unrecognized keys are passed through to the appropriate client class constructor
as string keyword arguments.
An explicit key value overrides an identical user-defined key value loaded from a
configuration file.
Returns:
:class:`~dwave.cloud.client.Client` (:class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`, default=:class:`dwave.cloud.qpu.Client`):
Appropriate instance of a QPU or software client.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples:
A variety of examples are given in :mod:`dwave.cloud.config`.
This example initializes :class:`~dwave.cloud.client.Client` from an
explicitly specified configuration file, "~/jane/my_path_to_config/my_cloud_conf.conf"::
>>> from dwave.cloud import Client
>>> client = Client.from_config(config_file='~/jane/my_path_to_config/my_cloud_conf.conf') # doctest: +SKIP
>>> # code that uses client
>>> client.close()
|
entailment
|
def close(self):
"""Perform a clean shutdown.
Waits for all the currently scheduled work to finish, kills the workers,
and closes the connection pool.
.. note:: Ensure your code does not submit new work while the connection is closing.
Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as`
construct) to ensure your code properly closes all resources.
Examples:
This example creates a client (based on an auto-detected configuration file), executes
some code (represented by a placeholder comment), and then closes the client.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> # code that uses client
>>> client.close()
"""
# Finish all the work that requires the connection
_LOGGER.debug("Joining submission queue")
self._submission_queue.join()
_LOGGER.debug("Joining cancel queue")
self._cancel_queue.join()
_LOGGER.debug("Joining poll queue")
self._poll_queue.join()
_LOGGER.debug("Joining load queue")
self._load_queue.join()
# Send kill-task to all worker threads
# Note: threads can't be 'killed' in Python, they have to die by
# natural causes
for _ in self._submission_workers:
self._submission_queue.put(None)
for _ in self._cancel_workers:
self._cancel_queue.put(None)
for _ in self._poll_workers:
self._poll_queue.put((-1, None))
for _ in self._load_workers:
self._load_queue.put(None)
# Wait for threads to die
for worker in chain(self._submission_workers, self._cancel_workers,
self._poll_workers, self._load_workers):
worker.join()
# Close the requests session
self.session.close()
|
Perform a clean shutdown.
Waits for all the currently scheduled work to finish, kills the workers,
and closes the connection pool.
.. note:: Ensure your code does not submit new work while the connection is closing.
Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as`
construct) to ensure your code properly closes all resources.
Examples:
This example creates a client (based on an auto-detected configuration file), executes
some code (represented by a placeholder comment), and then closes the client.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> # code that uses client
>>> client.close()
|
entailment
|
def get_solvers(self, refresh=False, order_by='avg_load', **filters):
"""Return a filtered list of solvers handled by this client.
Args:
refresh (bool, default=False):
Force refresh of cached list of solvers/properties.
order_by (callable/str/None, default='avg_load'):
Solver sorting key function (or :class:`Solver` attribute/item
dot-separated path). By default, solvers are sorted by average
load. To explicitly not sort the solvers (and use the API-returned
order), set ``order_by=None``.
Signature of the `key` `callable` is::
key :: (Solver s, Ord k) => s -> k
Basic structure of the `key` string path is::
"-"? (attr|item) ( "." (attr|item) )*
For example, to use solver property named ``max_anneal_schedule_points``,
available in ``Solver.properties`` dict, you can either specify a
callable `key`::
key=lambda solver: solver.properties['max_anneal_schedule_points']
or, you can use a short string path based key::
key='properties.max_anneal_schedule_points'
Solver derived properties, available as :class:`Solver` properties
can also be used (e.g. ``num_active_qubits``, ``online``,
``avg_load``, etc).
Ascending sort order is implied, unless the key string path does
not start with ``-``, in which case descending sort is used.
Note: the sort used for ordering solvers by `key` is **stable**,
meaning that if multiple solvers have the same value for the
key, their relative order is preserved, and effectively they are
in the same order as returned by the API.
Note: solvers with ``None`` for key appear last in the list of
solvers. When providing a key callable, ensure all values returned
are of the same type (particularly in Python 3). For solvers with
undefined key value, return ``None``.
**filters:
See `Filtering forms` and `Operators` below.
Solver filters are defined, similarly to Django QuerySet filters, with
keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`.
Each `<operator>` is a predicate (boolean) function that acts on two
arguments: value of feature `<name>` (described with keys path
`<key1.key2...keyN>`) and the required `<value>`.
Feature `<name>` can be:
1) a derived solver property, available as an identically named
:class:`Solver`'s property (`name`, `qpu`, `software`, `online`,
`num_active_qubits`, `avg_load`)
2) a solver parameter, available in :obj:`Solver.parameters`
3) a solver property, available in :obj:`Solver.properties`
4) a path describing a property in nested dictionaries
Filtering forms are:
* <derived_property>__<operator> (object <value>)
* <derived_property> (bool)
This form ensures the value of solver's property bound to `derived_property`,
after applying `operator` equals the `value`. The default operator is `eq`.
For example::
>>> client.get_solvers(avg_load__gt=0.5)
but also::
>>> client.get_solvers(online=True)
>>> # identical to:
>>> client.get_solvers(online__eq=True)
* <parameter>__<operator> (object <value>)
* <parameter> (bool)
This form ensures that the solver supports `parameter`. General operator form can
be used but usually does not make sense for parameters, since values are human-readable
descriptions. The default operator is `available`.
Example::
>>> client.get_solvers(flux_biases=True)
>>> # identical to:
>>> client.get_solvers(flux_biases__available=True)
* <property>__<operator> (object <value>)
* <property> (bool)
This form ensures the value of the solver's `property`, after applying `operator`
equals the righthand side `value`. The default operator is `eq`.
Note: if a non-existing parameter/property name/key given, the default operator is `eq`.
Operators are:
* `available` (<name>: str, <value>: bool):
Test availability of <name> feature.
* `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any):
Standard relational operators that compare feature <name> value with <value>.
* `regex` (<name>: str, <value>: str):
Test regular expression matching feature value.
* `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list):
Test feature <name> value (which should be a *range*) covers a given value or a subrange.
* `within` (<name>: str, <value>: range expressed as 2-tuple/list):
Test feature <name> value (which can be a *single value* or a *range*) is within a given range.
* `in` (<name>: str, <value>: container type):
Test feature <name> value is *in* <value> container.
* `contains` (<name>: str, <value>: any):
Test feature <name> value (container type) *contains* <value>.
* `issubset` (<name>: str, <value>: container type):
Test feature <name> value (container type) is a subset of <value>.
* `issuperset` (<name>: str, <value>: container type):
Test feature <name> value (container type) is a superset of <value>.
Derived properies are:
* `name` (str): Solver name/id.
* `qpu` (bool): Is solver QPU based?
* `software` (bool): Is solver software based?
* `online` (bool, default=True): Is solver online?
* `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`.
* `avg_load` (float): Solver's average load (similar to Unix load average).
Common solver parameters are:
* `flux_biases`: Should solver accept flux biases?
* `anneal_schedule`: Should solver accept anneal schedule?
Common solver properties are:
* `num_qubits` (int): Number of qubits available.
* `vfyc` (bool): Should solver work on "virtual full-yield chip"?
* `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points.
* `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range.
* `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter.
Returns:
list[Solver]: List of all solvers that satisfy the conditions.
Note:
Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or
:class:`dwave.cloud.sw.Client`) already filter solvers by resource
type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers`
on base class :class:`~dwave.cloud.client.Client`.
Examples::
client.get_solvers(
num_qubits__gt=2000, # we need more than 2000 qubits
num_qubits__lt=4000, # ... but fewer than 4000 qubits
num_qubits__within=(2000, 4000), # an alternative to the previous two lines
num_active_qubits=1089, # we want a particular number of active qubits
vfyc=True, # we require a fully yielded Chimera
vfyc__in=[False, None], # inverse of the previous filter
vfyc__available=False, # we want solvers that do not advertize the vfyc property
anneal_schedule=True, # we need support for custom anneal schedule
max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule
num_reads_range__covers=1000, # our solver must support returning 1000 reads
extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2]
couplings__contains=[0, 128], # coupling (edge between) qubits (0,128) must exist
couplings__issuperset=[[0,128], [0,4]],
# two couplings required: (0,128) and (0,4)
qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist
supported_problem_types__issubset={'ising', 'qubo'},
# require Ising, QUBO or both to be supported
name='DW_2000Q_3', # full solver name/ID match
name__regex='.*2000.*', # partial/regex-based solver name match
chip_id__regex='DW_.*', # chip ID prefix must be DW_
topology__type__eq="chimera" # topology.type must be chimera
)
"""
def covers_op(prop, val):
"""Does LHS `prop` (range) fully cover RHS `val` (range or item)?"""
# `prop` must be a 2-element list/tuple range.
if not isinstance(prop, (list, tuple)) or not len(prop) == 2:
raise ValueError("2-element list/tuple range required for LHS value")
llo, lhi = min(prop), max(prop)
# `val` can be a single value, or a range (2-list/2-tuple).
if isinstance(val, (list, tuple)) and len(val) == 2:
# val range within prop range?
rlo, rhi = min(val), max(val)
return llo <= rlo and lhi >= rhi
else:
# val item within prop range?
return llo <= val <= lhi
def within_op(prop, val):
"""Is LHS `prop` (range or item) fully covered by RHS `val` (range)?"""
try:
return covers_op(val, prop)
except ValueError:
raise ValueError("2-element list/tuple range required for RHS value")
def _set(iterable):
"""Like set(iterable), but works for lists as items in iterable.
Before constructing a set, lists are converted to tuples.
"""
first = next(iter(iterable))
if isinstance(first, list):
return set(tuple(x) for x in iterable)
return set(iterable)
def with_valid_lhs(op):
@wraps(op)
def _wrapper(prop, val):
if prop is None:
return False
return op(prop, val)
return _wrapper
# available filtering operators
ops = {
'lt': with_valid_lhs(operator.lt),
'lte': with_valid_lhs(operator.le),
'gt': with_valid_lhs(operator.gt),
'gte': with_valid_lhs(operator.ge),
'eq': operator.eq,
'available': lambda prop, val: prop is not None if val else prop is None,
'regex': with_valid_lhs(lambda prop, val: re.match("^{}$".format(val), prop)),
# range operations
'covers': with_valid_lhs(covers_op),
'within': with_valid_lhs(within_op),
# membership tests
'in': lambda prop, val: prop in val,
'contains': with_valid_lhs(lambda prop, val: val in prop),
# set tests
'issubset': with_valid_lhs(lambda prop, val: _set(prop).issubset(_set(val))),
'issuperset': with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))),
}
def predicate(solver, query, val):
# needs to handle kwargs like these:
# key=val
# key__op=val
# key__key=val
# key__key__op=val
# LHS is split on __ in `query`
assert len(query) >= 1
potential_path, potential_op_name = query[:-1], query[-1]
if potential_op_name in ops:
# op is explicit, and potential path is correct
op_name = potential_op_name
else:
# op is implied and depends on property type, path is the whole query
op_name = None
potential_path = query
path = '.'.join(potential_path)
if path in solver.derived_properties:
op = ops[op_name or 'eq']
return op(getattr(solver, path), val)
elif pluck(solver.parameters, path, None) is not None:
op = ops[op_name or 'available']
return op(pluck(solver.parameters, path), val)
elif pluck(solver.properties, path, None) is not None:
op = ops[op_name or 'eq']
return op(pluck(solver.properties, path), val)
else:
op = ops[op_name or 'eq']
return op(None, val)
# param validation
sort_reverse = False
if not order_by:
sort_key = None
elif isinstance(order_by, six.string_types):
if order_by[0] == '-':
sort_reverse = True
order_by = order_by[1:]
if not order_by:
sort_key = None
else:
sort_key = lambda solver: pluck(solver, order_by, None)
elif callable(order_by):
sort_key = order_by
else:
raise TypeError("expected string or callable for 'order_by'")
# default filters:
filters.setdefault('online', True)
predicates = []
for lhs, val in filters.items():
query = lhs.split('__')
predicates.append(partial(predicate, query=query, val=val))
_LOGGER.debug("Filtering solvers with predicates=%r", predicates)
# optimization for case when exact solver name/id is known:
# we can fetch only that solver
# NOTE: in future, complete feature-based filtering will be on server-side
query = dict(refresh_=refresh)
if 'name' in filters:
query['name'] = filters['name']
if 'name__eq' in filters:
query['name'] = filters['name__eq']
# filter
solvers = self._fetch_solvers(**query)
solvers = [s for s in solvers if all(p(s) for p in predicates)]
# sort: undefined (None) key values go last
if sort_key is not None:
solvers_with_keys = [(sort_key(solver), solver) for solver in solvers]
solvers_with_invalid_keys = [(key, solver) for key, solver in solvers_with_keys if key is None]
solvers_with_valid_keys = [(key, solver) for key, solver in solvers_with_keys if key is not None]
solvers_with_valid_keys.sort(key=operator.itemgetter(0))
solvers = [solver for key, solver in chain(solvers_with_valid_keys, solvers_with_invalid_keys)]
# reverse if necessary (as a separate step from sorting, so it works for invalid keys
# and plain list reverse without sorting)
if sort_reverse:
solvers.reverse()
return solvers
|
Return a filtered list of solvers handled by this client.
Args:
refresh (bool, default=False):
Force refresh of cached list of solvers/properties.
order_by (callable/str/None, default='avg_load'):
Solver sorting key function (or :class:`Solver` attribute/item
dot-separated path). By default, solvers are sorted by average
load. To explicitly not sort the solvers (and use the API-returned
order), set ``order_by=None``.
Signature of the `key` `callable` is::
key :: (Solver s, Ord k) => s -> k
Basic structure of the `key` string path is::
"-"? (attr|item) ( "." (attr|item) )*
For example, to use solver property named ``max_anneal_schedule_points``,
available in ``Solver.properties`` dict, you can either specify a
callable `key`::
key=lambda solver: solver.properties['max_anneal_schedule_points']
or, you can use a short string path based key::
key='properties.max_anneal_schedule_points'
Solver derived properties, available as :class:`Solver` properties
can also be used (e.g. ``num_active_qubits``, ``online``,
``avg_load``, etc).
Ascending sort order is implied, unless the key string path does
not start with ``-``, in which case descending sort is used.
Note: the sort used for ordering solvers by `key` is **stable**,
meaning that if multiple solvers have the same value for the
key, their relative order is preserved, and effectively they are
in the same order as returned by the API.
Note: solvers with ``None`` for key appear last in the list of
solvers. When providing a key callable, ensure all values returned
are of the same type (particularly in Python 3). For solvers with
undefined key value, return ``None``.
**filters:
See `Filtering forms` and `Operators` below.
Solver filters are defined, similarly to Django QuerySet filters, with
keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`.
Each `<operator>` is a predicate (boolean) function that acts on two
arguments: value of feature `<name>` (described with keys path
`<key1.key2...keyN>`) and the required `<value>`.
Feature `<name>` can be:
1) a derived solver property, available as an identically named
:class:`Solver`'s property (`name`, `qpu`, `software`, `online`,
`num_active_qubits`, `avg_load`)
2) a solver parameter, available in :obj:`Solver.parameters`
3) a solver property, available in :obj:`Solver.properties`
4) a path describing a property in nested dictionaries
Filtering forms are:
* <derived_property>__<operator> (object <value>)
* <derived_property> (bool)
This form ensures the value of solver's property bound to `derived_property`,
after applying `operator` equals the `value`. The default operator is `eq`.
For example::
>>> client.get_solvers(avg_load__gt=0.5)
but also::
>>> client.get_solvers(online=True)
>>> # identical to:
>>> client.get_solvers(online__eq=True)
* <parameter>__<operator> (object <value>)
* <parameter> (bool)
This form ensures that the solver supports `parameter`. General operator form can
be used but usually does not make sense for parameters, since values are human-readable
descriptions. The default operator is `available`.
Example::
>>> client.get_solvers(flux_biases=True)
>>> # identical to:
>>> client.get_solvers(flux_biases__available=True)
* <property>__<operator> (object <value>)
* <property> (bool)
This form ensures the value of the solver's `property`, after applying `operator`
equals the righthand side `value`. The default operator is `eq`.
Note: if a non-existing parameter/property name/key given, the default operator is `eq`.
Operators are:
* `available` (<name>: str, <value>: bool):
Test availability of <name> feature.
* `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any):
Standard relational operators that compare feature <name> value with <value>.
* `regex` (<name>: str, <value>: str):
Test regular expression matching feature value.
* `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list):
Test feature <name> value (which should be a *range*) covers a given value or a subrange.
* `within` (<name>: str, <value>: range expressed as 2-tuple/list):
Test feature <name> value (which can be a *single value* or a *range*) is within a given range.
* `in` (<name>: str, <value>: container type):
Test feature <name> value is *in* <value> container.
* `contains` (<name>: str, <value>: any):
Test feature <name> value (container type) *contains* <value>.
* `issubset` (<name>: str, <value>: container type):
Test feature <name> value (container type) is a subset of <value>.
* `issuperset` (<name>: str, <value>: container type):
Test feature <name> value (container type) is a superset of <value>.
Derived properies are:
* `name` (str): Solver name/id.
* `qpu` (bool): Is solver QPU based?
* `software` (bool): Is solver software based?
* `online` (bool, default=True): Is solver online?
* `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`.
* `avg_load` (float): Solver's average load (similar to Unix load average).
Common solver parameters are:
* `flux_biases`: Should solver accept flux biases?
* `anneal_schedule`: Should solver accept anneal schedule?
Common solver properties are:
* `num_qubits` (int): Number of qubits available.
* `vfyc` (bool): Should solver work on "virtual full-yield chip"?
* `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points.
* `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range.
* `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter.
Returns:
list[Solver]: List of all solvers that satisfy the conditions.
Note:
Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or
:class:`dwave.cloud.sw.Client`) already filter solvers by resource
type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers`
on base class :class:`~dwave.cloud.client.Client`.
Examples::
client.get_solvers(
num_qubits__gt=2000, # we need more than 2000 qubits
num_qubits__lt=4000, # ... but fewer than 4000 qubits
num_qubits__within=(2000, 4000), # an alternative to the previous two lines
num_active_qubits=1089, # we want a particular number of active qubits
vfyc=True, # we require a fully yielded Chimera
vfyc__in=[False, None], # inverse of the previous filter
vfyc__available=False, # we want solvers that do not advertize the vfyc property
anneal_schedule=True, # we need support for custom anneal schedule
max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule
num_reads_range__covers=1000, # our solver must support returning 1000 reads
extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2]
couplings__contains=[0, 128], # coupling (edge between) qubits (0,128) must exist
couplings__issuperset=[[0,128], [0,4]],
# two couplings required: (0,128) and (0,4)
qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist
supported_problem_types__issubset={'ising', 'qubo'},
# require Ising, QUBO or both to be supported
name='DW_2000Q_3', # full solver name/ID match
name__regex='.*2000.*', # partial/regex-based solver name match
chip_id__regex='DW_.*', # chip ID prefix must be DW_
topology__type__eq="chimera" # topology.type must be chimera
)
|
entailment
|
def solvers(self, refresh=False, **filters):
"""Deprecated in favor of :meth:`.get_solvers`."""
warnings.warn("'solvers' is deprecated in favor of 'get_solvers'.", DeprecationWarning)
return self.get_solvers(refresh=refresh, **filters)
|
Deprecated in favor of :meth:`.get_solvers`.
|
entailment
|
def get_solver(self, name=None, refresh=False, **filters):
"""Load the configuration for a single solver.
Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}`
is a URL configured for the client, and returns a :class:`.Solver` instance
that can be used to submit sampling problems to the D-Wave API and retrieve results.
Args:
name (str):
ID of the requested solver. ``None`` returns the default solver.
If default solver is not configured, ``None`` returns the first available
solver in ``Client``'s class (QPU/software/base).
**filters (keyword arguments, optional):
Dictionary of filters over features this solver has to have. For a list of
feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`.
order_by (callable/str, default='id'):
Solver sorting key function (or :class:`Solver` attribute name).
By default, solvers are sorted by ID/name.
refresh (bool):
Return solver from cache (if cached with ``get_solvers()``),
unless set to ``True``.
Returns:
:class:`.Solver`
Examples:
This example creates two solvers for a client instantiated from
a local system's auto-detected default configuration file, which configures
a connection to a D-Wave resource that provides two solvers. The first
uses the default solver, the second explicitly selects another solver.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> client.get_solvers() # doctest: +SKIP
[Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')]
>>> solver1 = client.get_solver() # doctest: +SKIP
>>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP
>>> solver1.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER1'
>>> solver2.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER2'
>>> # code that uses client
>>> client.close() # doctest: +SKIP
"""
_LOGGER.debug("Requested a solver that best matches feature filters=%r", filters)
# backward compatibility: name as the first feature
if name is not None:
filters.setdefault('name', name)
# in absence of other filters, config/env solver filters/name are used
if not filters and self.default_solver:
filters = self.default_solver
# get the first solver that satisfies all filters
try:
_LOGGER.debug("Fetching solvers according to filters=%r", filters)
return self.get_solvers(refresh=refresh, **filters)[0]
except IndexError:
raise SolverNotFoundError("Solver with the requested features not available")
|
Load the configuration for a single solver.
Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}`
is a URL configured for the client, and returns a :class:`.Solver` instance
that can be used to submit sampling problems to the D-Wave API and retrieve results.
Args:
name (str):
ID of the requested solver. ``None`` returns the default solver.
If default solver is not configured, ``None`` returns the first available
solver in ``Client``'s class (QPU/software/base).
**filters (keyword arguments, optional):
Dictionary of filters over features this solver has to have. For a list of
feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`.
order_by (callable/str, default='id'):
Solver sorting key function (or :class:`Solver` attribute name).
By default, solvers are sorted by ID/name.
refresh (bool):
Return solver from cache (if cached with ``get_solvers()``),
unless set to ``True``.
Returns:
:class:`.Solver`
Examples:
This example creates two solvers for a client instantiated from
a local system's auto-detected default configuration file, which configures
a connection to a D-Wave resource that provides two solvers. The first
uses the default solver, the second explicitly selects another solver.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> client.get_solvers() # doctest: +SKIP
[Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')]
>>> solver1 = client.get_solver() # doctest: +SKIP
>>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP
>>> solver1.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER1'
>>> solver2.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER2'
>>> # code that uses client
>>> client.close() # doctest: +SKIP
|
entailment
|
def _submit(self, body, future):
"""Enqueue a problem for submission to the server.
This method is thread safe.
"""
self._submission_queue.put(self._submit.Message(body, future))
|
Enqueue a problem for submission to the server.
This method is thread safe.
|
entailment
|
def _do_submit_problems(self):
"""Pull problems from the submission queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
try:
while True:
# Pull as many problems as we can, block on the first one,
# but once we have one problem, switch to non-blocking then
# submit without blocking again.
# `None` task is used to signal thread termination
item = self._submission_queue.get()
if item is None:
break
ready_problems = [item]
while len(ready_problems) < self._SUBMIT_BATCH_SIZE:
try:
ready_problems.append(self._submission_queue.get_nowait())
except queue.Empty:
break
# Submit the problems
_LOGGER.debug("Submitting %d problems", len(ready_problems))
body = '[' + ','.join(mess.body for mess in ready_problems) + ']'
try:
try:
response = self.session.post(posixpath.join(self.endpoint, 'problems/'), body)
localtime_of_response = epochnow()
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
_LOGGER.debug("Finished submitting %d problems", len(ready_problems))
except BaseException as exception:
_LOGGER.debug("Submit failed for %d problems", len(ready_problems))
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for mess in ready_problems:
mess.future._set_error(exception, sys.exc_info())
self._submission_queue.task_done()
continue
# Pass on the information
for submission, res in zip(ready_problems, message):
submission.future._set_clock_diff(response, localtime_of_response)
self._handle_problem_status(res, submission.future)
self._submission_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except BaseException as err:
_LOGGER.exception(err)
|
Pull problems from the submission queue and submit them.
Note:
This method is always run inside of a daemon thread.
|
entailment
|
def _handle_problem_status(self, message, future):
"""Handle the results of a problem submission or results request.
This method checks the status of the problem and puts it in the correct queue.
Args:
message (dict): Update message from the SAPI server wrt. this problem.
future `Future`: future corresponding to the problem
Note:
This method is always run inside of a daemon thread.
"""
try:
_LOGGER.trace("Handling response: %r", message)
_LOGGER.debug("Handling response for %s with status %s", message.get('id'), message.get('status'))
# Handle errors in batch mode
if 'error_code' in message and 'error_msg' in message:
raise SolverFailureError(message['error_msg'])
if 'status' not in message:
raise InvalidAPIResponseError("'status' missing in problem description response")
if 'id' not in message:
raise InvalidAPIResponseError("'id' missing in problem description response")
future.id = message['id']
future.remote_status = status = message['status']
# The future may not have the ID set yet
with future._single_cancel_lock:
# This handles the case where cancel has been called on a future
# before that future received the problem id
if future._cancel_requested:
if not future._cancel_sent and status == self.STATUS_PENDING:
# The problem has been canceled but the status says its still in queue
# try to cancel it
self._cancel(message['id'], future)
# If a cancel request could meaningfully be sent it has been now
future._cancel_sent = True
if not future.time_received and message.get('submitted_on'):
future.time_received = parse_datetime(message['submitted_on'])
if not future.time_solved and message.get('solved_on'):
future.time_solved = parse_datetime(message['solved_on'])
if not future.eta_min and message.get('earliest_estimated_completion'):
future.eta_min = parse_datetime(message['earliest_estimated_completion'])
if not future.eta_max and message.get('latest_estimated_completion'):
future.eta_max = parse_datetime(message['latest_estimated_completion'])
if status == self.STATUS_COMPLETE:
# TODO: find a better way to differentiate between
# `completed-on-submit` and `completed-on-poll`.
# Loading should happen only once, not every time when response
# doesn't contain 'answer'.
# If the message is complete, forward it to the future object
if 'answer' in message:
future._set_message(message)
# If the problem is complete, but we don't have the result data
# put the problem in the queue for loading results.
else:
self._load(future)
elif status in self.ANY_STATUS_ONGOING:
# If the response is pending add it to the queue.
self._poll(future)
elif status == self.STATUS_CANCELLED:
# If canceled return error
raise CanceledFutureError()
else:
# Return an error to the future object
errmsg = message.get('error_message', 'An unknown error has occurred.')
if 'solver is offline' in errmsg.lower():
raise SolverOfflineError(errmsg)
else:
raise SolverFailureError(errmsg)
except Exception as error:
# If there were any unhandled errors we need to release the
# lock in the future, otherwise deadlock occurs.
future._set_error(error, sys.exc_info())
|
Handle the results of a problem submission or results request.
This method checks the status of the problem and puts it in the correct queue.
Args:
message (dict): Update message from the SAPI server wrt. this problem.
future `Future`: future corresponding to the problem
Note:
This method is always run inside of a daemon thread.
|
entailment
|
def _do_cancel_problems(self):
"""Pull ids from the cancel queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
try:
while True:
# Pull as many problems as we can, block when none are available.
# `None` task is used to signal thread termination
item = self._cancel_queue.get()
if item is None:
break
item_list = [item]
while True:
try:
item_list.append(self._cancel_queue.get_nowait())
except queue.Empty:
break
# Submit the problems, attach the ids as a json list in the
# body of the delete query.
try:
body = [item[0] for item in item_list]
try:
self.session.delete(posixpath.join(self.endpoint, 'problems/'), json=body)
except requests.exceptions.Timeout:
raise RequestTimeout
except Exception as err:
for _, future in item_list:
if future is not None:
future._set_error(err, sys.exc_info())
# Mark all the ids as processed regardless of success or failure.
[self._cancel_queue.task_done() for _ in item_list]
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
_LOGGER.exception(err)
|
Pull ids from the cancel queue and submit them.
Note:
This method is always run inside of a daemon thread.
|
entailment
|
def _poll(self, future):
"""Enqueue a problem to poll the server for status."""
if future._poll_backoff is None:
# on first poll, start with minimal back-off
future._poll_backoff = self._POLL_BACKOFF_MIN
# if we have ETA of results, schedule the first poll for then
if future.eta_min and self._is_clock_diff_acceptable(future):
at = datetime_to_timestamp(future.eta_min)
_LOGGER.debug("Response ETA indicated and local clock reliable. "
"Scheduling first polling at +%.2f sec", at - epochnow())
else:
at = time.time() + future._poll_backoff
_LOGGER.debug("Response ETA not indicated, or local clock unreliable. "
"Scheduling first polling at +%.2f sec", at - epochnow())
else:
# update exponential poll back-off, clipped to a range
future._poll_backoff = \
max(self._POLL_BACKOFF_MIN,
min(future._poll_backoff * 2, self._POLL_BACKOFF_MAX))
# for poll priority we use timestamp of next scheduled poll
at = time.time() + future._poll_backoff
now = utcnow()
future_age = (now - future.time_created).total_seconds()
_LOGGER.debug("Polling scheduled at %.2f with %.2f sec new back-off for: %s (future's age: %.2f sec)",
at, future._poll_backoff, future.id, future_age)
# don't enqueue for next poll if polling_timeout is exceeded by then
future_age_on_next_poll = future_age + (at - datetime_to_timestamp(now))
if self.polling_timeout is not None and future_age_on_next_poll > self.polling_timeout:
_LOGGER.debug("Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!",
future_age_on_next_poll, self.polling_timeout)
raise PollingTimeout
self._poll_queue.put((at, future))
|
Enqueue a problem to poll the server for status.
|
entailment
|
def _do_poll_problems(self):
"""Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
"""
try:
# grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME)
frame_futures = {}
def task_done():
self._poll_queue.task_done()
def add(future):
# add future to query frame_futures
# returns: worker lives on?
# `None` task signifies thread termination
if future is None:
task_done()
return False
if future.id not in frame_futures and not future.done():
frame_futures[future.id] = future
else:
task_done()
return True
while True:
frame_futures.clear()
# blocking add first scheduled
frame_earliest, future = self._poll_queue.get()
if not add(future):
return
# try grouping if scheduled within grouping timeframe
while len(frame_futures) < self._STATUS_QUERY_SIZE:
try:
task = self._poll_queue.get_nowait()
except queue.Empty:
break
at, future = task
if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME:
if not add(future):
return
else:
task_done()
self._poll_queue.put(task)
break
# build a query string with ids of all futures in this frame
ids = [future.id for future in frame_futures.values()]
_LOGGER.debug("Polling for status of futures: %s", ids)
query_string = 'problems/?id=' + ','.join(ids)
# if futures were cancelled while `add`ing, skip empty frame
if not ids:
continue
# wait until `frame_earliest` before polling
delay = frame_earliest - time.time()
if delay > 0:
_LOGGER.debug("Pausing polling %.2f sec for futures: %s", delay, ids)
time.sleep(delay)
else:
_LOGGER.trace("Skipping non-positive delay of %.2f sec", delay)
try:
_LOGGER.trace("Executing poll API request")
try:
response = self.session.get(posixpath.join(self.endpoint, query_string))
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
statuses = response.json()
for status in statuses:
self._handle_problem_status(status, frame_futures[status['id']])
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for id_ in frame_futures.keys():
frame_futures[id_]._set_error(IOError(exception), sys.exc_info())
for id_ in frame_futures.keys():
task_done()
time.sleep(0)
except Exception as err:
_LOGGER.exception(err)
|
Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
|
entailment
|
def _do_load_results(self):
"""Submit a query asking for the results for a particular problem.
To request the results of a problem: ``GET /problems/{problem_id}/``
Note:
This method is always run inside of a daemon thread.
"""
try:
while True:
# Select a problem
future = self._load_queue.get()
# `None` task signifies thread termination
if future is None:
break
_LOGGER.debug("Loading results of: %s", future.id)
# Submit the query
query_string = 'problems/{}/'.format(future.id)
try:
try:
response = self.session.get(posixpath.join(self.endpoint, query_string))
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
future._set_error(IOError(exception), sys.exc_info())
continue
# Dispatch the results, mark the task complete
self._handle_problem_status(message, future)
self._load_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
_LOGGER.error('Load result error: ' + str(err))
|
Submit a query asking for the results for a particular problem.
To request the results of a problem: ``GET /problems/{problem_id}/``
Note:
This method is always run inside of a daemon thread.
|
entailment
|
def encode_bqm_as_qp(solver, linear, quadratic):
"""Encode the binary quadratic problem for submission to a given solver,
using the `qp` format for data.
Args:
solver (:class:`dwave.cloud.solver.Solver`):
The solver used.
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the model.
Returns:
encoded submission dictionary
"""
active = active_qubits(linear, quadratic)
# Encode linear terms. The coefficients of the linear terms of the objective
# are encoded as an array of little endian 64 bit doubles.
# This array is then base64 encoded into a string safe for json.
# The order of the terms is determined by the _encoding_qubits property
# specified by the server.
# Note: only active qubits are coded with double, inactive with NaN
nan = float('nan')
lin = [uniform_get(linear, qubit, 0 if qubit in active else nan)
for qubit in solver._encoding_qubits]
lin = base64.b64encode(struct.pack('<' + ('d' * len(lin)), *lin))
# Encode the coefficients of the quadratic terms of the objective
# in the same manner as the linear terms, in the order given by the
# _encoding_couplers property, discarding tailing zero couplings
quad = [quadratic.get((q1,q2), 0) + quadratic.get((q2,q1), 0)
for (q1,q2) in solver._encoding_couplers
if q1 in active and q2 in active]
quad = base64.b64encode(struct.pack('<' + ('d' * len(quad)), *quad))
# The name for this encoding is 'qp' and is explicitly included in the
# message for easier extension in the future.
return {
'format': 'qp',
'lin': lin.decode('utf-8'),
'quad': quad.decode('utf-8')
}
|
Encode the binary quadratic problem for submission to a given solver,
using the `qp` format for data.
Args:
solver (:class:`dwave.cloud.solver.Solver`):
The solver used.
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the model.
Returns:
encoded submission dictionary
|
entailment
|
def decode_qp(msg):
"""Decode SAPI response that uses `qp` format, without numpy.
The 'qp' format is the current encoding used for problems and samples.
In this encoding the reply is generally json, but the samples, energy,
and histogram data (the occurrence count of each solution), are all
base64 encoded arrays.
"""
# Decode the simple buffers
result = msg['answer']
result['active_variables'] = _decode_ints(result['active_variables'])
active_variables = result['active_variables']
if 'num_occurrences' in result:
result['num_occurrences'] = _decode_ints(result['num_occurrences'])
result['energies'] = _decode_doubles(result['energies'])
# Measure out the size of the binary solution data
num_solutions = len(result['energies'])
num_variables = len(result['active_variables'])
solution_bytes = -(-num_variables // 8) # equivalent to int(math.ceil(num_variables / 8.))
total_variables = result['num_variables']
# Figure out the null value for output
default = 3 if msg['type'] == 'qubo' else 0
# Decode the solutions, which will be byte aligned in binary format
binary = base64.b64decode(result['solutions'])
solutions = []
for solution_index in range(num_solutions):
# Grab the section of the buffer related to the current
buffer_index = solution_index * solution_bytes
solution_buffer = binary[buffer_index:buffer_index + solution_bytes]
bytes = struct.unpack('B' * solution_bytes, solution_buffer)
# Assume None values
solution = [default] * total_variables
index = 0
for byte in bytes:
# Parse each byte and read how ever many bits can be
values = _decode_byte(byte)
for _ in range(min(8, len(active_variables) - index)):
i = active_variables[index]
index += 1
solution[i] = values.pop()
# Switch to the right variable space
if msg['type'] == 'ising':
values = {0: -1, 1: 1}
solution = [values.get(v, default) for v in solution]
solutions.append(solution)
result['solutions'] = solutions
return result
|
Decode SAPI response that uses `qp` format, without numpy.
The 'qp' format is the current encoding used for problems and samples.
In this encoding the reply is generally json, but the samples, energy,
and histogram data (the occurrence count of each solution), are all
base64 encoded arrays.
|
entailment
|
def _decode_byte(byte):
"""Helper for decode_qp, turns a single byte into a list of bits.
Args:
byte: byte to be decoded
Returns:
list of bits corresponding to byte
"""
bits = []
for _ in range(8):
bits.append(byte & 1)
byte >>= 1
return bits
|
Helper for decode_qp, turns a single byte into a list of bits.
Args:
byte: byte to be decoded
Returns:
list of bits corresponding to byte
|
entailment
|
def _decode_ints(message):
"""Helper for decode_qp, decodes an int array.
The int array is stored as little endian 32 bit integers.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
"""
binary = base64.b64decode(message)
return struct.unpack('<' + ('i' * (len(binary) // 4)), binary)
|
Helper for decode_qp, decodes an int array.
The int array is stored as little endian 32 bit integers.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
|
entailment
|
def _decode_doubles(message):
"""Helper for decode_qp, decodes a double array.
The double array is stored as little endian 64 bit doubles.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
Args:
message: the double array
Returns:
decoded double array
"""
binary = base64.b64decode(message)
return struct.unpack('<' + ('d' * (len(binary) // 8)), binary)
|
Helper for decode_qp, decodes a double array.
The double array is stored as little endian 64 bit doubles.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
Args:
message: the double array
Returns:
decoded double array
|
entailment
|
def decode_qp_numpy(msg, return_matrix=True):
"""Decode SAPI response, results in a `qp` format, explicitly using numpy.
If numpy is not installed, the method will fail.
To use numpy for decoding, but return the results a lists (instead of
numpy matrices), set `return_matrix=False`.
"""
import numpy as np
result = msg['answer']
# Build some little endian type encodings
double_type = np.dtype(np.double)
double_type = double_type.newbyteorder('<')
int_type = np.dtype(np.int32)
int_type = int_type.newbyteorder('<')
# Decode the simple buffers
result['energies'] = np.frombuffer(base64.b64decode(result['energies']),
dtype=double_type)
if 'num_occurrences' in result:
result['num_occurrences'] = \
np.frombuffer(base64.b64decode(result['num_occurrences']),
dtype=int_type)
result['active_variables'] = \
np.frombuffer(base64.b64decode(result['active_variables']),
dtype=int_type)
# Measure out the binary data size
num_solutions = len(result['energies'])
active_variables = result['active_variables']
num_variables = len(active_variables)
total_variables = result['num_variables']
# Decode the solutions, which will be a continuous run of bits
byte_type = np.dtype(np.uint8)
byte_type = byte_type.newbyteorder('<')
bits = np.unpackbits(np.frombuffer(base64.b64decode(result['solutions']),
dtype=byte_type))
# Clip off the extra bits from encoding
if num_solutions:
bits = np.reshape(bits, (num_solutions, bits.size // num_solutions))
bits = np.delete(bits, range(num_variables, bits.shape[1]), 1)
# Switch from bits to spins
default = 3
if msg['type'] == 'ising':
bits = bits.astype(np.int8)
bits *= 2
bits -= 1
default = 0
# Fill in the missing variables
solutions = np.full((num_solutions, total_variables), default, dtype=np.int8)
solutions[:, active_variables] = bits
result['solutions'] = solutions
# If the final result shouldn't be numpy formats switch back to python objects
if not return_matrix:
result['energies'] = result['energies'].tolist()
if 'num_occurrences' in result:
result['num_occurrences'] = result['num_occurrences'].tolist()
result['active_variables'] = result['active_variables'].tolist()
result['solutions'] = result['solutions'].tolist()
return result
|
Decode SAPI response, results in a `qp` format, explicitly using numpy.
If numpy is not installed, the method will fail.
To use numpy for decoding, but return the results a lists (instead of
numpy matrices), set `return_matrix=False`.
|
entailment
|
def evaluate_ising(linear, quad, state):
"""Calculate the energy of a state given the Hamiltonian.
Args:
linear: Linear Hamiltonian terms.
quad: Quadratic Hamiltonian terms.
state: Vector of spins describing the system state.
Returns:
Energy of the state evaluated by the given energy function.
"""
# If we were given a numpy array cast to list
if _numpy and isinstance(state, np.ndarray):
return evaluate_ising(linear, quad, state.tolist())
# Accumulate the linear and quadratic values
energy = 0.0
for index, value in uniform_iterator(linear):
energy += state[index] * value
for (index_a, index_b), value in six.iteritems(quad):
energy += value * state[index_a] * state[index_b]
return energy
|
Calculate the energy of a state given the Hamiltonian.
Args:
linear: Linear Hamiltonian terms.
quad: Quadratic Hamiltonian terms.
state: Vector of spins describing the system state.
Returns:
Energy of the state evaluated by the given energy function.
|
entailment
|
def active_qubits(linear, quadratic):
"""Calculate a set of all active qubits. Qubit is "active" if it has
bias or coupling attached.
Args:
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the model.
Returns:
set:
Active qubits' indices.
"""
active = {idx for idx,bias in uniform_iterator(linear)}
for edge, _ in six.iteritems(quadratic):
active.update(edge)
return active
|
Calculate a set of all active qubits. Qubit is "active" if it has
bias or coupling attached.
Args:
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the model.
Returns:
set:
Active qubits' indices.
|
entailment
|
def generate_random_ising_problem(solver, h_range=None, j_range=None):
"""Generates an Ising problem formulation valid for a particular solver,
using all qubits and all couplings and linear/quadratic biases sampled
uniformly from `h_range`/`j_range`.
"""
if h_range is None:
h_range = solver.properties.get('h_range', [-1, 1])
if j_range is None:
j_range = solver.properties.get('j_range', [-1, 1])
lin = {qubit: random.uniform(*h_range) for qubit in solver.nodes}
quad = {edge: random.uniform(*j_range) for edge in solver.undirected_edges}
return lin, quad
|
Generates an Ising problem formulation valid for a particular solver,
using all qubits and all couplings and linear/quadratic biases sampled
uniformly from `h_range`/`j_range`.
|
entailment
|
def uniform_iterator(sequence):
"""Uniform (key, value) iteration on a `dict`,
or (idx, value) on a `list`."""
if isinstance(sequence, abc.Mapping):
return six.iteritems(sequence)
else:
return enumerate(sequence)
|
Uniform (key, value) iteration on a `dict`,
or (idx, value) on a `list`.
|
entailment
|
def uniform_get(sequence, index, default=None):
"""Uniform `dict`/`list` item getter, where `index` is interpreted as a key
for maps and as numeric index for lists."""
if isinstance(sequence, abc.Mapping):
return sequence.get(index, default)
else:
return sequence[index] if index < len(sequence) else default
|
Uniform `dict`/`list` item getter, where `index` is interpreted as a key
for maps and as numeric index for lists.
|
entailment
|
def strip_head(sequence, values):
"""Strips elements of `values` from the beginning of `sequence`."""
values = set(values)
return list(itertools.dropwhile(lambda x: x in values, sequence))
|
Strips elements of `values` from the beginning of `sequence`.
|
entailment
|
def strip_tail(sequence, values):
"""Strip `values` from the end of `sequence`."""
return list(reversed(list(strip_head(reversed(sequence), values))))
|
Strip `values` from the end of `sequence`.
|
entailment
|
def click_info_switch(f):
"""Decorator to create eager Click info switch option, as described in:
http://click.pocoo.org/6/options/#callbacks-and-eager-options.
Takes a no-argument function and abstracts the boilerplate required by
Click (value checking, exit on done).
Example:
@click.option('--my-option', is_flag=True, callback=my_option,
expose_value=False, is_eager=True)
def test():
pass
@click_info_switch
def my_option()
click.echo('some info related to my switch')
"""
@wraps(f)
def wrapped(ctx, param, value):
if not value or ctx.resilient_parsing:
return
f()
ctx.exit()
return wrapped
|
Decorator to create eager Click info switch option, as described in:
http://click.pocoo.org/6/options/#callbacks-and-eager-options.
Takes a no-argument function and abstracts the boilerplate required by
Click (value checking, exit on done).
Example:
@click.option('--my-option', is_flag=True, callback=my_option,
expose_value=False, is_eager=True)
def test():
pass
@click_info_switch
def my_option()
click.echo('some info related to my switch')
|
entailment
|
def datetime_to_timestamp(dt):
"""Convert timezone-aware `datetime` to POSIX timestamp and
return seconds since UNIX epoch.
Note: similar to `datetime.timestamp()` in Python 3.3+.
"""
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)
return (dt - epoch).total_seconds()
|
Convert timezone-aware `datetime` to POSIX timestamp and
return seconds since UNIX epoch.
Note: similar to `datetime.timestamp()` in Python 3.3+.
|
entailment
|
def user_agent(name, version):
"""Return User-Agent ~ "name/version language/version interpreter/version os/version"."""
def _interpreter():
name = platform.python_implementation()
version = platform.python_version()
bitness = platform.architecture()[0]
if name == 'PyPy':
version = '.'.join(map(str, sys.pypy_version_info[:3]))
full_version = [version]
if bitness:
full_version.append(bitness)
return name, "-".join(full_version)
tags = [
(name, version),
("python", platform.python_version()),
_interpreter(),
("machine", platform.machine() or 'unknown'),
("system", platform.system() or 'unknown'),
("platform", platform.platform() or 'unknown'),
]
return ' '.join("{}/{}".format(name, version) for name, version in tags)
|
Return User-Agent ~ "name/version language/version interpreter/version os/version".
|
entailment
|
def argshash(self, args, kwargs):
"Hash mutable arguments' containers with immutable keys and values."
a = repr(args)
b = repr(sorted((repr(k), repr(v)) for k, v in kwargs.items()))
return a + b
|
Hash mutable arguments' containers with immutable keys and values.
|
entailment
|
def get_context_data(self, **kwargs):
"""
Adds `next` to the context.
This makes sure that the `next` parameter doesn't get lost if the
form was submitted invalid.
"""
ctx = super(UserMediaImageViewMixin, self).get_context_data(**kwargs)
ctx.update({
'action': self.action,
'next': self.next,
})
return ctx
|
Adds `next` to the context.
This makes sure that the `next` parameter doesn't get lost if the
form was submitted invalid.
|
entailment
|
def get_success_url(self):
"""
Returns the success URL.
This is either the given `next` URL parameter or the content object's
`get_absolute_url` method's return value.
"""
if self.next:
return self.next
if self.object and self.object.content_object:
return self.object.content_object.get_absolute_url()
raise Exception(
'No content object given. Please provide ``next`` in your POST'
' data')
|
Returns the success URL.
This is either the given `next` URL parameter or the content object's
`get_absolute_url` method's return value.
|
entailment
|
def dispatch(self, request, *args, **kwargs):
"""Adds useful objects to the class and performs security checks."""
self._add_next_and_user(request)
self.content_object = None
self.content_type = None
self.object_id = kwargs.get('object_id', None)
if kwargs.get('content_type'):
# Check if the user forged the URL and posted a non existant
# content type
try:
self.content_type = ContentType.objects.get(
model=kwargs.get('content_type'))
except ContentType.DoesNotExist:
raise Http404
if self.content_type:
# Check if the user forged the URL and tries to append the image to
# an object that does not exist
try:
self.content_object = \
self.content_type.get_object_for_this_type(
pk=self.object_id)
except ObjectDoesNotExist:
raise Http404
if self.content_object and hasattr(self.content_object, 'user'):
# Check if the user forged the URL and tries to append the image to
# an object that does not belong to him
if not self.content_object.user == self.user:
raise Http404
return super(CreateImageView, self).dispatch(request, *args, **kwargs)
|
Adds useful objects to the class and performs security checks.
|
entailment
|
def dispatch(self, request, *args, **kwargs):
"""Adds useful objects to the class."""
self._add_next_and_user(request)
return super(DeleteImageView, self).dispatch(request, *args, **kwargs)
|
Adds useful objects to the class.
|
entailment
|
def get_queryset(self):
"""
Making sure that a user can only delete his own images.
Even when he forges the request URL.
"""
queryset = super(DeleteImageView, self).get_queryset()
queryset = queryset.filter(user=self.user)
return queryset
|
Making sure that a user can only delete his own images.
Even when he forges the request URL.
|
entailment
|
def dispatch(self, request, *args, **kwargs):
"""Adds useful objects to the class."""
self._add_next_and_user(request)
return super(UpdateImageView, self).dispatch(request, *args, **kwargs)
|
Adds useful objects to the class.
|
entailment
|
def get_queryset(self):
"""
Making sure that a user can only edit his own images.
Even when he forges the request URL.
"""
queryset = super(UpdateImageView, self).get_queryset()
queryset = queryset.filter(user=self.user)
return queryset
|
Making sure that a user can only edit his own images.
Even when he forges the request URL.
|
entailment
|
def _delete_images(self, instance):
"""Deletes all user media images of the given instance."""
UserMediaImage.objects.filter(
content_type=ContentType.objects.get_for_model(instance),
object_id=instance.pk,
user=instance.user,
).delete()
|
Deletes all user media images of the given instance.
|
entailment
|
def clean_image(self):
"""
It seems like in Django 1.5 something has changed.
When Django tries to validate the form, it checks if the generated
filename fit into the max_length. But at this point, self.instance.user
is not yet set so our filename generation function cannot create
the new file path because it needs the user id. Setting
self.instance.user at this point seems to work as a workaround.
"""
self.instance.user = self.user
data = self.cleaned_data.get('image')
return data
|
It seems like in Django 1.5 something has changed.
When Django tries to validate the form, it checks if the generated
filename fit into the max_length. But at this point, self.instance.user
is not yet set so our filename generation function cannot create
the new file path because it needs the user id. Setting
self.instance.user at this point seems to work as a workaround.
|
entailment
|
def get_image_file_path(instance, filename):
"""Returns a unique filename for images."""
ext = filename.split('.')[-1]
filename = '%s.%s' % (uuid.uuid4(), ext)
return os.path.join(
'user_media', str(instance.user.pk), 'images', filename)
|
Returns a unique filename for images.
|
entailment
|
def image_post_delete_handler(sender, instance, **kwargs):
"""
Makes sure that a an image is also deleted from the media directory.
This should prevent a load of "dead" image files on disc.
"""
for f in glob.glob('{}/{}*'.format(instance.image.storage.location,
instance.image.name)):
if not os.path.isdir(f):
instance.image.storage.delete(f)
|
Makes sure that a an image is also deleted from the media directory.
This should prevent a load of "dead" image files on disc.
|
entailment
|
def box_coordinates(self):
"""Returns a thumbnail's coordinates."""
if (
self.thumb_x is not None and
self.thumb_y is not None and
self.thumb_x2 is not None and
self.thumb_y2 is not None
):
return (
int(self.thumb_x),
int(self.thumb_y),
int(self.thumb_x2),
int(self.thumb_y2),
)
return False
|
Returns a thumbnail's coordinates.
|
entailment
|
def large_size(self, as_string=True):
"""Returns a thumbnail's large size."""
size = getattr(settings, 'USER_MEDIA_THUMB_SIZE_LARGE', (150, 150))
if as_string:
return u'{}x{}'.format(size[0], size[1])
return size
|
Returns a thumbnail's large size.
|
entailment
|
def crop_box(im, box=False, **kwargs):
"""Uses box coordinates to crop an image without resizing it first."""
if box:
im = im.crop(box)
return im
|
Uses box coordinates to crop an image without resizing it first.
|
entailment
|
def load_germanet(host = None, port = None, database_name = 'germanet'):
'''
Loads a GermaNet instance connected to the given MongoDB instance.
Arguments:
- `host`: the hostname of the MongoDB instance
- `port`: the port number of the MongoDB instance
- `database_name`: the name of the GermaNet database on the
MongoDB instance
'''
client = MongoClient(host, port)
germanet_db = client[database_name]
return GermaNet(germanet_db)
|
Loads a GermaNet instance connected to the given MongoDB instance.
Arguments:
- `host`: the hostname of the MongoDB instance
- `port`: the port number of the MongoDB instance
- `database_name`: the name of the GermaNet database on the
MongoDB instance
|
entailment
|
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
|
Set the cache size used to reduce the number of database
access operations.
|
entailment
|
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
|
A generator over all the lemmas in the GermaNet database.
|
entailment
|
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
|
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
|
entailment
|
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
|
A generator over all the synsets in the GermaNet database.
|
entailment
|
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
|
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
|
entailment
|
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
|
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
|
entailment
|
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
|
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
|
entailment
|
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
|
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
|
entailment
|
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
|
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
|
entailment
|
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
|
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
|
entailment
|
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
|
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
|
entailment
|
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
|
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
|
entailment
|
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
|
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
|
entailment
|
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
|
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
|
entailment
|
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
|
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
|
entailment
|
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
|
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
|
entailment
|
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
|
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
|
entailment
|
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
|
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
|
entailment
|
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
|
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
|
entailment
|
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
|
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
|
entailment
|
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
|
Main function.
|
entailment
|
def handle_In(self, node):
'''in'''
try:
elts = node.elts
except AttributeError:
raise ParseError('Invalid value type for `in` operator: {0}'.format(node.__class__.__name__),
col_offset=node.col_offset)
return {'$in': list(map(self.field.handle, elts))}
|
in
|
entailment
|
def AssignVar(self, value):
"""Assign a value to this Value."""
self.value = value
# Call OnAssignVar on options.
[option.OnAssignVar() for option in self.options]
|
Assign a value to this Value.
|
entailment
|
def Parse(self, value):
"""Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error.
"""
value_line = value.split(' ')
if len(value_line) < 3:
raise TextFSMTemplateError('Expect at least 3 tokens on line.')
if not value_line[2].startswith('('):
# Options are present
options = value_line[1]
for option in options.split(','):
self._AddOption(option)
# Call option OnCreateOptions callbacks
[option.OnCreateOptions() for option in self.options]
self.name = value_line[2]
self.regex = ' '.join(value_line[3:])
else:
# There were no valid options, so there are no options.
# Treat this argument as the name.
self.name = value_line[1]
self.regex = ' '.join(value_line[2:])
if len(self.name) > self.max_name_len:
raise TextFSMTemplateError(
"Invalid Value name '%s' or name too long." % self.name)
if (not re.match(r'^\(.*\)$', self.regex) or
self.regex.count('(') != self.regex.count(')')):
raise TextFSMTemplateError(
"Value '%s' must be contained within a '()' pair." % self.regex)
self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex)
|
Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error.
|
entailment
|
def _CheckLine(self, line):
"""Passes the line through each rule until a match is made.
Args:
line: A string, the current input line.
"""
for rule in self._cur_state:
matched = self._CheckRule(rule, line)
if matched:
for value in matched.groupdict():
self._AssignVar(matched, value)
if self._Operations(rule):
# Not a Continue so check for state transition.
if rule.new_state:
if rule.new_state not in ('End', 'EOF'):
self._cur_state = self.states[rule.new_state]
self._cur_state_name = rule.new_state
break
|
Passes the line through each rule until a match is made.
Args:
line: A string, the current input line.
|
entailment
|
def _make_graphite_api_points_list(influxdb_data):
"""Make graphite-api data points dictionary from Influxdb ResultSet data"""
_data = {}
for key in influxdb_data.keys():
_data[key[0]] = [(datetime.datetime.fromtimestamp(float(d['time'])),
d['value']) for d in influxdb_data.get_points(key[0])]
return _data
|
Make graphite-api data points dictionary from Influxdb ResultSet data
|
entailment
|
def _setup_logger(self, level, log_file):
"""Setup log level and log file if set"""
if logger.handlers:
return
level = getattr(logging, level.upper())
logger.setLevel(level)
formatter = logging.Formatter(
'[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s')
handler = logging.StreamHandler()
logger.addHandler(handler)
handler.setFormatter(formatter)
if not log_file:
return
try:
handler = TimedRotatingFileHandler(log_file)
except IOError:
logger.error("Could not write to %s, falling back to stdout",
log_file)
else:
logger.addHandler(handler)
handler.setFormatter(formatter)
|
Setup log level and log file if set
|
entailment
|
def compile_regex(self, fmt, query):
"""Turn glob (graphite) queries into compiled regex
* becomes .*
. becomes \.
fmt argument is so that caller can control anchoring (must contain exactly 1 {0} !"""
return re.compile(fmt.format(
query.pattern.replace('.', '\.').replace('*', '[^\.]*').replace(
'{', '(').replace(',', '|').replace('}', ')')
))
|
Turn glob (graphite) queries into compiled regex
* becomes .*
. becomes \.
fmt argument is so that caller can control anchoring (must contain exactly 1 {0} !
|
entailment
|
def find(expression, schema=None):
'''
Gets an <expression> and optional <schema>.
<expression> should be a string of python code.
<schema> should be a dictionary mapping field names to types.
'''
parser = SchemaFreeParser() if schema is None else SchemaAwareParser(schema)
return parser.parse(expression)
|
Gets an <expression> and optional <schema>.
<expression> should be a string of python code.
<schema> should be a dictionary mapping field names to types.
|
entailment
|
def sort(fields):
'''
Gets a list of <fields> to sort by.
Also supports getting a single string for sorting by one field.
Reverse sort is supported by appending '-' to the field name.
Example: sort(['age', '-height']) will sort by ascending age and descending height.
'''
from pymongo import ASCENDING, DESCENDING
from bson import SON
if isinstance(fields, str):
fields = [fields]
if not hasattr(fields, '__iter__'):
raise ValueError("expected a list of strings or a string. not a {}".format(type(fields)))
sort = []
for field in fields:
if field.startswith('-'):
field = field[1:]
sort.append((field, DESCENDING))
continue
elif field.startswith('+'):
field = field[1:]
sort.append((field, ASCENDING))
return {'$sort': SON(sort)}
|
Gets a list of <fields> to sort by.
Also supports getting a single string for sorting by one field.
Reverse sort is supported by appending '-' to the field name.
Example: sort(['age', '-height']) will sort by ascending age and descending height.
|
entailment
|
def ordered(obj):
"""
Return sorted version of nested dicts/lists for comparing.
Modified from:
http://stackoverflow.com/a/25851972
"""
if isinstance(obj, collections.abc.Mapping):
return sorted((k, ordered(v)) for k, v in obj.items())
# Special case str since it's a collections.abc.Iterable
elif isinstance(obj, str):
return obj
elif isinstance(obj, collections.abc.Iterable):
return sorted(ordered(x) for x in obj)
else:
return obj
|
Return sorted version of nested dicts/lists for comparing.
Modified from:
http://stackoverflow.com/a/25851972
|
entailment
|
def walk_subclasses(root):
"""Does not yield the input class"""
classes = [root]
visited = set()
while classes:
cls = classes.pop()
if cls is type or cls in visited:
continue
classes.extend(cls.__subclasses__())
visited.add(cls)
if cls is not root:
yield cls
|
Does not yield the input class
|
entailment
|
def dump_key(engine, obj):
"""dump the hash (and range, if there is one) key(s) of an object into
a dynamo-friendly format.
returns {dynamo_name: {type: value} for dynamo_name in hash/range keys}
"""
key = {}
for key_column in obj.Meta.keys:
key_value = getattr(obj, key_column.name, missing)
if key_value is missing:
raise MissingKey("{!r} is missing {}: {!r}".format(
obj, "hash_key" if key_column.hash_key else "range_key",
key_column.name
))
# noinspection PyProtectedMember
key_value = engine._dump(key_column.typedef, key_value)
key[key_column.dynamo_name] = key_value
return key
|
dump the hash (and range, if there is one) key(s) of an object into
a dynamo-friendly format.
returns {dynamo_name: {type: value} for dynamo_name in hash/range keys}
|
entailment
|
def new_expiry(days=DEFAULT_PASTE_LIFETIME_DAYS):
"""Return an expiration `days` in the future"""
now = delorean.Delorean()
return now + datetime.timedelta(days=days)
|
Return an expiration `days` in the future
|
entailment
|
def sync(obj, engine):
"""Mark the object as having been persisted at least once.
Store the latest snapshot of all marked values."""
snapshot = Condition()
# Only expect values (or lack of a value) for columns that have been explicitly set
for column in sorted(_obj_tracking[obj]["marked"], key=lambda col: col.dynamo_name):
value = getattr(obj, column.name, None)
value = engine._dump(column.typedef, value)
condition = column == value
# The renderer shouldn't try to dump the value again.
# We're dumping immediately in case the value is mutable,
# such as a set or (many) custom data types.
condition.dumped = True
snapshot &= condition
_obj_tracking[obj]["snapshot"] = snapshot
|
Mark the object as having been persisted at least once.
Store the latest snapshot of all marked values.
|
entailment
|
def printable_name(column, path=None):
"""Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
"""
pieces = [column.name]
path = path or path_of(column)
for segment in path:
if isinstance(segment, str):
pieces.append(segment)
else:
pieces[-1] += "[{}]".format(segment)
return ".".join(pieces)
|
Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
|
entailment
|
def iter_conditions(condition):
"""Yield all conditions within the given condition.
If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found)."""
conditions = list()
visited = set()
# Has to be split out, since we don't want to visit the root (for cyclic conditions)
# but we don't want to yield it (if it's non-cyclic) because this only yields inner conditions
if condition.operation in {"and", "or"}:
conditions.extend(reversed(condition.values))
elif condition.operation == "not":
conditions.append(condition.values[0])
else:
conditions.append(condition)
while conditions:
condition = conditions.pop()
if condition in visited:
continue
visited.add(condition)
yield condition
if condition.operation in {"and", "or", "not"}:
conditions.extend(reversed(condition.values))
|
Yield all conditions within the given condition.
If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found).
|
entailment
|
def iter_columns(condition):
"""
Yield all columns in the condition or its inner conditions.
Unwraps proxies when the condition's column (or any of its values) include paths.
"""
# Like iter_conditions, this can't live in each condition without going possibly infinite on the
# recursion, or passing the visited set through every call. That makes the signature ugly, so we
# take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the
# actual columns.
visited = set()
for condition in iter_conditions(condition):
if condition.operation in ("and", "or", "not"):
continue
# Non-meta conditions always have a column, and each of values has the potential to be a column.
# Comparison will only have a list of len 1, but it's simpler to just iterate values and check each
# unwrap proxies created for paths
column = proxied(condition.column)
# special case for None
# this could also have skipped on isinstance(condition, Condition)
# but this is slightly more flexible for users to create their own None-sentinel Conditions
if column is None:
continue
if column not in visited:
visited.add(column)
yield column
for value in condition.values:
if isinstance(value, ComparisonMixin):
if value not in visited:
visited.add(value)
yield value
|
Yield all columns in the condition or its inner conditions.
Unwraps proxies when the condition's column (or any of its values) include paths.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.