repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
chrisspen/dtree
dtree.py
Forest.best_oob_mae_weight
def best_oob_mae_weight(trees): """ Returns weights so that the tree with smallest out-of-bag mean absolute error """ best = (+1e999999, None) for tree in trees: oob_mae = tree.out_of_bag_mae if oob_mae is None or oob_mae.mean is None: continue best = min(best, (oob_mae.mean, tree)) best_mae, best_tree = best if best_tree is None: return return [(1.0, best_tree)]
python
def best_oob_mae_weight(trees): """ Returns weights so that the tree with smallest out-of-bag mean absolute error """ best = (+1e999999, None) for tree in trees: oob_mae = tree.out_of_bag_mae if oob_mae is None or oob_mae.mean is None: continue best = min(best, (oob_mae.mean, tree)) best_mae, best_tree = best if best_tree is None: return return [(1.0, best_tree)]
[ "def", "best_oob_mae_weight", "(", "trees", ")", ":", "best", "=", "(", "+", "1e999999", ",", "None", ")", "for", "tree", "in", "trees", ":", "oob_mae", "=", "tree", ".", "out_of_bag_mae", "if", "oob_mae", "is", "None", "or", "oob_mae", ".", "mean", "is", "None", ":", "continue", "best", "=", "min", "(", "best", ",", "(", "oob_mae", ".", "mean", ",", "tree", ")", ")", "best_mae", ",", "best_tree", "=", "best", "if", "best_tree", "is", "None", ":", "return", "return", "[", "(", "1.0", ",", "best_tree", ")", "]" ]
Returns weights so that the tree with smallest out-of-bag mean absolute error
[ "Returns", "weights", "so", "that", "the", "tree", "with", "smallest", "out", "-", "of", "-", "bag", "mean", "absolute", "error" ]
train
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L1492-L1505
chrisspen/dtree
dtree.py
Forest.mean_oob_mae_weight
def mean_oob_mae_weight(trees): """ Returns weights proportional to the out-of-bag mean absolute error for each tree. """ weights = [] active_trees = [] for tree in trees: oob_mae = tree.out_of_bag_mae if oob_mae is None or oob_mae.mean is None: continue weights.append(oob_mae.mean) active_trees.append(tree) if not active_trees: return weights = normalize(weights) return zip(weights, active_trees)
python
def mean_oob_mae_weight(trees): """ Returns weights proportional to the out-of-bag mean absolute error for each tree. """ weights = [] active_trees = [] for tree in trees: oob_mae = tree.out_of_bag_mae if oob_mae is None or oob_mae.mean is None: continue weights.append(oob_mae.mean) active_trees.append(tree) if not active_trees: return weights = normalize(weights) return zip(weights, active_trees)
[ "def", "mean_oob_mae_weight", "(", "trees", ")", ":", "weights", "=", "[", "]", "active_trees", "=", "[", "]", "for", "tree", "in", "trees", ":", "oob_mae", "=", "tree", ".", "out_of_bag_mae", "if", "oob_mae", "is", "None", "or", "oob_mae", ".", "mean", "is", "None", ":", "continue", "weights", ".", "append", "(", "oob_mae", ".", "mean", ")", "active_trees", ".", "append", "(", "tree", ")", "if", "not", "active_trees", ":", "return", "weights", "=", "normalize", "(", "weights", ")", "return", "zip", "(", "weights", ",", "active_trees", ")" ]
Returns weights proportional to the out-of-bag mean absolute error for each tree.
[ "Returns", "weights", "proportional", "to", "the", "out", "-", "of", "-", "bag", "mean", "absolute", "error", "for", "each", "tree", "." ]
train
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L1508-L1523
chrisspen/dtree
dtree.py
Forest._grow_trees
def _grow_trees(self): """ Adds new trees to the forest according to the specified growth method. """ if self.grow_method == GROW_AUTO_INCREMENTAL: self.tree_kwargs['auto_grow'] = True while len(self.trees) < self.size: self.trees.append(Tree(data=self.data, **self.tree_kwargs))
python
def _grow_trees(self): """ Adds new trees to the forest according to the specified growth method. """ if self.grow_method == GROW_AUTO_INCREMENTAL: self.tree_kwargs['auto_grow'] = True while len(self.trees) < self.size: self.trees.append(Tree(data=self.data, **self.tree_kwargs))
[ "def", "_grow_trees", "(", "self", ")", ":", "if", "self", ".", "grow_method", "==", "GROW_AUTO_INCREMENTAL", ":", "self", ".", "tree_kwargs", "[", "'auto_grow'", "]", "=", "True", "while", "len", "(", "self", ".", "trees", ")", "<", "self", ".", "size", ":", "self", ".", "trees", ".", "append", "(", "Tree", "(", "data", "=", "self", ".", "data", ",", "*", "*", "self", ".", "tree_kwargs", ")", ")" ]
Adds new trees to the forest according to the specified growth method.
[ "Adds", "new", "trees", "to", "the", "forest", "according", "to", "the", "specified", "growth", "method", "." ]
train
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L1525-L1533
chrisspen/dtree
dtree.py
Forest.predict
def predict(self, record): """ Attempts to predict the value of the class attribute by aggregating the predictions of each tree. Parameters: weighting_formula := a callable that takes a list of trees and returns a list of weights. """ # Get raw predictions. # {tree:raw prediction} predictions = {} for tree in self.trees: _p = tree.predict(record) if _p is None: continue if isinstance(_p, CDist): if _p.mean is None: continue elif isinstance(_p, DDist): if not _p.count: continue predictions[tree] = _p if not predictions: return # Normalize weights and aggregate final prediction. weights = self.weighting_method(predictions.keys()) if not weights: return # assert sum(weights) == 1.0, "Sum of weights must equal 1." if self.data.is_continuous_class: # Merge continuous class predictions. total = sum(w*predictions[tree].mean for w, tree in weights) else: # Merge discrete class predictions. total = DDist() for weight, tree in weights: prediction = predictions[tree] for cls_value, cls_prob in prediction.probs: total.add(cls_value, cls_prob*weight) return total
python
def predict(self, record): """ Attempts to predict the value of the class attribute by aggregating the predictions of each tree. Parameters: weighting_formula := a callable that takes a list of trees and returns a list of weights. """ # Get raw predictions. # {tree:raw prediction} predictions = {} for tree in self.trees: _p = tree.predict(record) if _p is None: continue if isinstance(_p, CDist): if _p.mean is None: continue elif isinstance(_p, DDist): if not _p.count: continue predictions[tree] = _p if not predictions: return # Normalize weights and aggregate final prediction. weights = self.weighting_method(predictions.keys()) if not weights: return # assert sum(weights) == 1.0, "Sum of weights must equal 1." if self.data.is_continuous_class: # Merge continuous class predictions. total = sum(w*predictions[tree].mean for w, tree in weights) else: # Merge discrete class predictions. total = DDist() for weight, tree in weights: prediction = predictions[tree] for cls_value, cls_prob in prediction.probs: total.add(cls_value, cls_prob*weight) return total
[ "def", "predict", "(", "self", ",", "record", ")", ":", "# Get raw predictions.", "# {tree:raw prediction}", "predictions", "=", "{", "}", "for", "tree", "in", "self", ".", "trees", ":", "_p", "=", "tree", ".", "predict", "(", "record", ")", "if", "_p", "is", "None", ":", "continue", "if", "isinstance", "(", "_p", ",", "CDist", ")", ":", "if", "_p", ".", "mean", "is", "None", ":", "continue", "elif", "isinstance", "(", "_p", ",", "DDist", ")", ":", "if", "not", "_p", ".", "count", ":", "continue", "predictions", "[", "tree", "]", "=", "_p", "if", "not", "predictions", ":", "return", "# Normalize weights and aggregate final prediction.", "weights", "=", "self", ".", "weighting_method", "(", "predictions", ".", "keys", "(", ")", ")", "if", "not", "weights", ":", "return", "# assert sum(weights) == 1.0, \"Sum of weights must equal 1.\"", "if", "self", ".", "data", ".", "is_continuous_class", ":", "# Merge continuous class predictions.", "total", "=", "sum", "(", "w", "*", "predictions", "[", "tree", "]", ".", "mean", "for", "w", ",", "tree", "in", "weights", ")", "else", ":", "# Merge discrete class predictions.", "total", "=", "DDist", "(", ")", "for", "weight", ",", "tree", "in", "weights", ":", "prediction", "=", "predictions", "[", "tree", "]", "for", "cls_value", ",", "cls_prob", "in", "prediction", ".", "probs", ":", "total", ".", "add", "(", "cls_value", ",", "cls_prob", "*", "weight", ")", "return", "total" ]
Attempts to predict the value of the class attribute by aggregating the predictions of each tree. Parameters: weighting_formula := a callable that takes a list of trees and returns a list of weights.
[ "Attempts", "to", "predict", "the", "value", "of", "the", "class", "attribute", "by", "aggregating", "the", "predictions", "of", "each", "tree", ".", "Parameters", ":", "weighting_formula", ":", "=", "a", "callable", "that", "takes", "a", "list", "of", "trees", "and", "returns", "a", "list", "of", "weights", "." ]
train
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L1539-L1582
chrisspen/dtree
dtree.py
Forest.train
def train(self, record): """ Updates the trees with the given training record. """ self._fell_trees() self._grow_trees() for tree in self.trees: if random.random() < self.sample_ratio: tree.train(record) else: tree.out_of_bag_samples.append(record) while len(tree.out_of_bag_samples) > self.max_out_of_bag_samples: tree.out_of_bag_samples.pop(0)
python
def train(self, record): """ Updates the trees with the given training record. """ self._fell_trees() self._grow_trees() for tree in self.trees: if random.random() < self.sample_ratio: tree.train(record) else: tree.out_of_bag_samples.append(record) while len(tree.out_of_bag_samples) > self.max_out_of_bag_samples: tree.out_of_bag_samples.pop(0)
[ "def", "train", "(", "self", ",", "record", ")", ":", "self", ".", "_fell_trees", "(", ")", "self", ".", "_grow_trees", "(", ")", "for", "tree", "in", "self", ".", "trees", ":", "if", "random", ".", "random", "(", ")", "<", "self", ".", "sample_ratio", ":", "tree", ".", "train", "(", "record", ")", "else", ":", "tree", ".", "out_of_bag_samples", ".", "append", "(", "record", ")", "while", "len", "(", "tree", ".", "out_of_bag_samples", ")", ">", "self", ".", "max_out_of_bag_samples", ":", "tree", ".", "out_of_bag_samples", ".", "pop", "(", "0", ")" ]
Updates the trees with the given training record.
[ "Updates", "the", "trees", "with", "the", "given", "training", "record", "." ]
train
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L1613-L1625
dwavesystems/dwave-cloud-client
dwave/cloud/config.py
get_configfile_paths
def get_configfile_paths(system=True, user=True, local=True, only_existing=True): """Return a list of local configuration file paths. Search paths for configuration files on the local system are based on homebase_ and depend on operating system; for example, for Linux systems these might include ``dwave.conf`` in the current working directory (CWD), user-local ``.config/dwave/``, and system-wide ``/etc/dwave/``. .. _homebase: https://github.com/dwavesystems/homebase Args: system (boolean, default=True): Search for system-wide configuration files. user (boolean, default=True): Search for user-local configuration files. local (boolean, default=True): Search for local configuration files (in CWD). only_existing (boolean, default=True): Return only paths for files that exist on the local system. Returns: list[str]: List of configuration file paths. Examples: This example displays all paths to configuration files on a Windows system running Python 2.7 and then finds the single existing configuration file. >>> import dwave.cloud as dc >>> # Display paths >>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP [u'C:\\ProgramData\\dwavesystem\\dwave\\dwave.conf', u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf', '.\\dwave.conf'] >>> # Find existing files >>> dc.config.get_configfile_paths() # doctest: +SKIP [u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf'] """ candidates = [] # system-wide has the lowest priority, `/etc/dwave/dwave.conf` if system: candidates.extend(homebase.site_config_dir_list( app_author=CONF_AUTHOR, app_name=CONF_APP, use_virtualenv=False, create=False)) # user-local will override it, `~/.config/dwave/dwave.conf` if user: candidates.append(homebase.user_config_dir( app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False, use_virtualenv=False, create=False)) # highest priority (overrides all): `./dwave.conf` if local: candidates.append(".") paths = [os.path.join(base, CONF_FILENAME) for base in candidates] if only_existing: paths = list(filter(os.path.exists, paths)) return paths
python
def get_configfile_paths(system=True, user=True, local=True, only_existing=True): """Return a list of local configuration file paths. Search paths for configuration files on the local system are based on homebase_ and depend on operating system; for example, for Linux systems these might include ``dwave.conf`` in the current working directory (CWD), user-local ``.config/dwave/``, and system-wide ``/etc/dwave/``. .. _homebase: https://github.com/dwavesystems/homebase Args: system (boolean, default=True): Search for system-wide configuration files. user (boolean, default=True): Search for user-local configuration files. local (boolean, default=True): Search for local configuration files (in CWD). only_existing (boolean, default=True): Return only paths for files that exist on the local system. Returns: list[str]: List of configuration file paths. Examples: This example displays all paths to configuration files on a Windows system running Python 2.7 and then finds the single existing configuration file. >>> import dwave.cloud as dc >>> # Display paths >>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP [u'C:\\ProgramData\\dwavesystem\\dwave\\dwave.conf', u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf', '.\\dwave.conf'] >>> # Find existing files >>> dc.config.get_configfile_paths() # doctest: +SKIP [u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf'] """ candidates = [] # system-wide has the lowest priority, `/etc/dwave/dwave.conf` if system: candidates.extend(homebase.site_config_dir_list( app_author=CONF_AUTHOR, app_name=CONF_APP, use_virtualenv=False, create=False)) # user-local will override it, `~/.config/dwave/dwave.conf` if user: candidates.append(homebase.user_config_dir( app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False, use_virtualenv=False, create=False)) # highest priority (overrides all): `./dwave.conf` if local: candidates.append(".") paths = [os.path.join(base, CONF_FILENAME) for base in candidates] if only_existing: paths = list(filter(os.path.exists, paths)) return paths
[ "def", "get_configfile_paths", "(", "system", "=", "True", ",", "user", "=", "True", ",", "local", "=", "True", ",", "only_existing", "=", "True", ")", ":", "candidates", "=", "[", "]", "# system-wide has the lowest priority, `/etc/dwave/dwave.conf`", "if", "system", ":", "candidates", ".", "extend", "(", "homebase", ".", "site_config_dir_list", "(", "app_author", "=", "CONF_AUTHOR", ",", "app_name", "=", "CONF_APP", ",", "use_virtualenv", "=", "False", ",", "create", "=", "False", ")", ")", "# user-local will override it, `~/.config/dwave/dwave.conf`", "if", "user", ":", "candidates", ".", "append", "(", "homebase", ".", "user_config_dir", "(", "app_author", "=", "CONF_AUTHOR", ",", "app_name", "=", "CONF_APP", ",", "roaming", "=", "False", ",", "use_virtualenv", "=", "False", ",", "create", "=", "False", ")", ")", "# highest priority (overrides all): `./dwave.conf`", "if", "local", ":", "candidates", ".", "append", "(", "\".\"", ")", "paths", "=", "[", "os", ".", "path", ".", "join", "(", "base", ",", "CONF_FILENAME", ")", "for", "base", "in", "candidates", "]", "if", "only_existing", ":", "paths", "=", "list", "(", "filter", "(", "os", ".", "path", ".", "exists", ",", "paths", ")", ")", "return", "paths" ]
Return a list of local configuration file paths. Search paths for configuration files on the local system are based on homebase_ and depend on operating system; for example, for Linux systems these might include ``dwave.conf`` in the current working directory (CWD), user-local ``.config/dwave/``, and system-wide ``/etc/dwave/``. .. _homebase: https://github.com/dwavesystems/homebase Args: system (boolean, default=True): Search for system-wide configuration files. user (boolean, default=True): Search for user-local configuration files. local (boolean, default=True): Search for local configuration files (in CWD). only_existing (boolean, default=True): Return only paths for files that exist on the local system. Returns: list[str]: List of configuration file paths. Examples: This example displays all paths to configuration files on a Windows system running Python 2.7 and then finds the single existing configuration file. >>> import dwave.cloud as dc >>> # Display paths >>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP [u'C:\\ProgramData\\dwavesystem\\dwave\\dwave.conf', u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf', '.\\dwave.conf'] >>> # Find existing files >>> dc.config.get_configfile_paths() # doctest: +SKIP [u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
[ "Return", "a", "list", "of", "local", "configuration", "file", "paths", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/config.py#L239-L304
dwavesystems/dwave-cloud-client
dwave/cloud/config.py
get_default_configfile_path
def get_default_configfile_path(): """Return the default configuration-file path. Typically returns a user-local configuration file; e.g: ``~/.config/dwave/dwave.conf``. Returns: str: Configuration file path. Examples: This example displays the default configuration file on an Ubuntu Unix system running IPython 2.7. >>> import dwave.cloud as dc >>> # Display paths >>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP ['/etc/xdg/xdg-ubuntu/dwave/dwave.conf', '/usr/share/upstart/xdg/dwave/dwave.conf', '/etc/xdg/dwave/dwave.conf', '/home/mary/.config/dwave/dwave.conf', './dwave.conf'] >>> # Find default configuration path >>> dc.config.get_default_configfile_path() # doctest: +SKIP '/home/mary/.config/dwave/dwave.conf' """ base = homebase.user_config_dir( app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False, use_virtualenv=False, create=False) path = os.path.join(base, CONF_FILENAME) return path
python
def get_default_configfile_path(): """Return the default configuration-file path. Typically returns a user-local configuration file; e.g: ``~/.config/dwave/dwave.conf``. Returns: str: Configuration file path. Examples: This example displays the default configuration file on an Ubuntu Unix system running IPython 2.7. >>> import dwave.cloud as dc >>> # Display paths >>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP ['/etc/xdg/xdg-ubuntu/dwave/dwave.conf', '/usr/share/upstart/xdg/dwave/dwave.conf', '/etc/xdg/dwave/dwave.conf', '/home/mary/.config/dwave/dwave.conf', './dwave.conf'] >>> # Find default configuration path >>> dc.config.get_default_configfile_path() # doctest: +SKIP '/home/mary/.config/dwave/dwave.conf' """ base = homebase.user_config_dir( app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False, use_virtualenv=False, create=False) path = os.path.join(base, CONF_FILENAME) return path
[ "def", "get_default_configfile_path", "(", ")", ":", "base", "=", "homebase", ".", "user_config_dir", "(", "app_author", "=", "CONF_AUTHOR", ",", "app_name", "=", "CONF_APP", ",", "roaming", "=", "False", ",", "use_virtualenv", "=", "False", ",", "create", "=", "False", ")", "path", "=", "os", ".", "path", ".", "join", "(", "base", ",", "CONF_FILENAME", ")", "return", "path" ]
Return the default configuration-file path. Typically returns a user-local configuration file; e.g: ``~/.config/dwave/dwave.conf``. Returns: str: Configuration file path. Examples: This example displays the default configuration file on an Ubuntu Unix system running IPython 2.7. >>> import dwave.cloud as dc >>> # Display paths >>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP ['/etc/xdg/xdg-ubuntu/dwave/dwave.conf', '/usr/share/upstart/xdg/dwave/dwave.conf', '/etc/xdg/dwave/dwave.conf', '/home/mary/.config/dwave/dwave.conf', './dwave.conf'] >>> # Find default configuration path >>> dc.config.get_default_configfile_path() # doctest: +SKIP '/home/mary/.config/dwave/dwave.conf'
[ "Return", "the", "default", "configuration", "-", "file", "path", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/config.py#L336-L367
dwavesystems/dwave-cloud-client
dwave/cloud/config.py
load_config_from_files
def load_config_from_files(filenames=None): """Load D-Wave Cloud Client configuration from a list of files. .. note:: This method is not standardly used to set up D-Wave Cloud Client configuration. It is recommended you use :meth:`.Client.from_config` or :meth:`.config.load_config` instead. Configuration files comply with standard Windows INI-like format, parsable with Python's :mod:`configparser`. A section called ``defaults`` contains default values inherited by other sections. Each filename in the list (each configuration file loaded) progressively upgrades the final configuration, on a key by key basis, per each section. Args: filenames (list[str], default=None): D-Wave Cloud Client configuration files (paths and names). If ``None``, searches for a configuration file named ``dwave.conf`` in all system-wide configuration directories, in the user-local configuration directory, and in the current working directory, following the user/system configuration paths of :func:`get_configfile_paths`. Returns: :obj:`~configparser.ConfigParser`: :class:`dict`-like mapping of configuration sections (profiles) to mapping of per-profile keys holding values. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples: This example loads configurations from two files. One contains a default section with key/values that are overwritten by any profile section that contains that key/value; for example, profile dw2000b in file dwave_b.conf overwrites the default URL and client type, which profile dw2000a inherits from the defaults section, while profile dw2000a overwrites the API token that profile dw2000b inherits. The files, which are located in the current working directory, are (1) dwave_a.conf:: [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] solver = EXAMPLE_2000Q_SYSTEM token = DEF-987654321987654321987654321 and (2) dwave_b.conf:: [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = sw solver = EXAMPLE_2000Q_SYSTEM The following example code loads configuration from both these files, with the defined overrides and inheritance. .. code:: python >>> import dwave.cloud as dc >>> import sys >>> configuration = dc.config.load_config_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP >>> configuration.write(sys.stdout) # doctest: +SKIP [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] solver = EXAMPLE_2000Q_SYSTEM token = DEF-987654321987654321987654321 [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = sw solver = EXAMPLE_2000Q_SYSTEM """ if filenames is None: filenames = get_configfile_paths() config = configparser.ConfigParser(default_section="defaults") for filename in filenames: try: with open(filename, 'r') as f: config.read_file(f, filename) except (IOError, OSError): raise ConfigFileReadError("Failed to read {!r}".format(filename)) except configparser.Error: raise ConfigFileParseError("Failed to parse {!r}".format(filename)) return config
python
def load_config_from_files(filenames=None): """Load D-Wave Cloud Client configuration from a list of files. .. note:: This method is not standardly used to set up D-Wave Cloud Client configuration. It is recommended you use :meth:`.Client.from_config` or :meth:`.config.load_config` instead. Configuration files comply with standard Windows INI-like format, parsable with Python's :mod:`configparser`. A section called ``defaults`` contains default values inherited by other sections. Each filename in the list (each configuration file loaded) progressively upgrades the final configuration, on a key by key basis, per each section. Args: filenames (list[str], default=None): D-Wave Cloud Client configuration files (paths and names). If ``None``, searches for a configuration file named ``dwave.conf`` in all system-wide configuration directories, in the user-local configuration directory, and in the current working directory, following the user/system configuration paths of :func:`get_configfile_paths`. Returns: :obj:`~configparser.ConfigParser`: :class:`dict`-like mapping of configuration sections (profiles) to mapping of per-profile keys holding values. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples: This example loads configurations from two files. One contains a default section with key/values that are overwritten by any profile section that contains that key/value; for example, profile dw2000b in file dwave_b.conf overwrites the default URL and client type, which profile dw2000a inherits from the defaults section, while profile dw2000a overwrites the API token that profile dw2000b inherits. The files, which are located in the current working directory, are (1) dwave_a.conf:: [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] solver = EXAMPLE_2000Q_SYSTEM token = DEF-987654321987654321987654321 and (2) dwave_b.conf:: [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = sw solver = EXAMPLE_2000Q_SYSTEM The following example code loads configuration from both these files, with the defined overrides and inheritance. .. code:: python >>> import dwave.cloud as dc >>> import sys >>> configuration = dc.config.load_config_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP >>> configuration.write(sys.stdout) # doctest: +SKIP [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] solver = EXAMPLE_2000Q_SYSTEM token = DEF-987654321987654321987654321 [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = sw solver = EXAMPLE_2000Q_SYSTEM """ if filenames is None: filenames = get_configfile_paths() config = configparser.ConfigParser(default_section="defaults") for filename in filenames: try: with open(filename, 'r') as f: config.read_file(f, filename) except (IOError, OSError): raise ConfigFileReadError("Failed to read {!r}".format(filename)) except configparser.Error: raise ConfigFileParseError("Failed to parse {!r}".format(filename)) return config
[ "def", "load_config_from_files", "(", "filenames", "=", "None", ")", ":", "if", "filenames", "is", "None", ":", "filenames", "=", "get_configfile_paths", "(", ")", "config", "=", "configparser", ".", "ConfigParser", "(", "default_section", "=", "\"defaults\"", ")", "for", "filename", "in", "filenames", ":", "try", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "config", ".", "read_file", "(", "f", ",", "filename", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "raise", "ConfigFileReadError", "(", "\"Failed to read {!r}\"", ".", "format", "(", "filename", ")", ")", "except", "configparser", ".", "Error", ":", "raise", "ConfigFileParseError", "(", "\"Failed to parse {!r}\"", ".", "format", "(", "filename", ")", ")", "return", "config" ]
Load D-Wave Cloud Client configuration from a list of files. .. note:: This method is not standardly used to set up D-Wave Cloud Client configuration. It is recommended you use :meth:`.Client.from_config` or :meth:`.config.load_config` instead. Configuration files comply with standard Windows INI-like format, parsable with Python's :mod:`configparser`. A section called ``defaults`` contains default values inherited by other sections. Each filename in the list (each configuration file loaded) progressively upgrades the final configuration, on a key by key basis, per each section. Args: filenames (list[str], default=None): D-Wave Cloud Client configuration files (paths and names). If ``None``, searches for a configuration file named ``dwave.conf`` in all system-wide configuration directories, in the user-local configuration directory, and in the current working directory, following the user/system configuration paths of :func:`get_configfile_paths`. Returns: :obj:`~configparser.ConfigParser`: :class:`dict`-like mapping of configuration sections (profiles) to mapping of per-profile keys holding values. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples: This example loads configurations from two files. One contains a default section with key/values that are overwritten by any profile section that contains that key/value; for example, profile dw2000b in file dwave_b.conf overwrites the default URL and client type, which profile dw2000a inherits from the defaults section, while profile dw2000a overwrites the API token that profile dw2000b inherits. The files, which are located in the current working directory, are (1) dwave_a.conf:: [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] solver = EXAMPLE_2000Q_SYSTEM token = DEF-987654321987654321987654321 and (2) dwave_b.conf:: [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = sw solver = EXAMPLE_2000Q_SYSTEM The following example code loads configuration from both these files, with the defined overrides and inheritance. .. code:: python >>> import dwave.cloud as dc >>> import sys >>> configuration = dc.config.load_config_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP >>> configuration.write(sys.stdout) # doctest: +SKIP [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] solver = EXAMPLE_2000Q_SYSTEM token = DEF-987654321987654321987654321 [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = sw solver = EXAMPLE_2000Q_SYSTEM
[ "Load", "D", "-", "Wave", "Cloud", "Client", "configuration", "from", "a", "list", "of", "files", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/config.py#L370-L468
dwavesystems/dwave-cloud-client
dwave/cloud/config.py
load_profile_from_files
def load_profile_from_files(filenames=None, profile=None): """Load a profile from a list of D-Wave Cloud Client configuration files. .. note:: This method is not standardly used to set up D-Wave Cloud Client configuration. It is recommended you use :meth:`.Client.from_config` or :meth:`.config.load_config` instead. Configuration files comply with standard Windows INI-like format, parsable with Python's :mod:`configparser`. Each file in the list is progressively searched until the first profile is found. This function does not input profile information from environment variables. Args: filenames (list[str], default=None): D-Wave cloud client configuration files (path and name). If ``None``, searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. profile (str, default=None): Name of profile to return from reading the configuration from the specified configuration file(s). If ``None``, progressively falls back in the following order: (1) ``profile`` key following ``[defaults]`` section. (2) First non-``[defaults]`` section. (3) ``[defaults]`` section. Returns: dict: Mapping of configuration keys to values. If no valid config/profile is found, returns an empty dict. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. :exc:`ValueError`: Profile name not found. Examples: This example loads a profile based on configurations from two files. It finds the first profile, dw2000a, in the first file, dwave_a.conf, and adds to the values of the defaults section, overwriting the existing client value, while ignoring the profile in the second file, dwave_b.conf. The files, which are located in the current working directory, are (1) dwave_a.conf:: [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] client = sw solver = EXAMPLE_2000Q_SYSTEM_A token = DEF-987654321987654321987654321 and (2) dwave_b.conf:: [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = qpu solver = EXAMPLE_2000Q_SYSTEM_B The following example code loads profile values from parsing both these files, by default loading the first profile encountered or an explicitly specified profile. >>> import dwave.cloud as dc >>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP {'client': u'sw', 'endpoint': u'https://url.of.some.dwavesystem.com/sapi', 'solver': u'EXAMPLE_2000Q_SYSTEM_A', 'token': u'DEF-987654321987654321987654321'} >>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"], ... profile='dw2000b') # doctest: +SKIP {'client': u'qpu', 'endpoint': u'https://url.of.some.other.dwavesystem.com/sapi', 'solver': u'EXAMPLE_2000Q_SYSTEM_B', 'token': u'ABC-123456789123456789123456789'} """ # progressively build config from a file, or a list of auto-detected files # raises ConfigFileReadError/ConfigFileParseError on error config = load_config_from_files(filenames) # determine profile name fallback: # (1) profile key under [defaults], # (2) first non-[defaults] section # (3) [defaults] section first_section = next(iter(config.sections() + [None])) config_defaults = config.defaults() if not profile: profile = config_defaults.get('profile', first_section) if profile: try: section = dict(config[profile]) except KeyError: raise ValueError("Config profile {!r} not found".format(profile)) else: # as the very last resort (unspecified profile name and # no profiles defined in config), try to use [defaults] if config_defaults: section = config_defaults else: section = {} return section
python
def load_profile_from_files(filenames=None, profile=None): """Load a profile from a list of D-Wave Cloud Client configuration files. .. note:: This method is not standardly used to set up D-Wave Cloud Client configuration. It is recommended you use :meth:`.Client.from_config` or :meth:`.config.load_config` instead. Configuration files comply with standard Windows INI-like format, parsable with Python's :mod:`configparser`. Each file in the list is progressively searched until the first profile is found. This function does not input profile information from environment variables. Args: filenames (list[str], default=None): D-Wave cloud client configuration files (path and name). If ``None``, searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. profile (str, default=None): Name of profile to return from reading the configuration from the specified configuration file(s). If ``None``, progressively falls back in the following order: (1) ``profile`` key following ``[defaults]`` section. (2) First non-``[defaults]`` section. (3) ``[defaults]`` section. Returns: dict: Mapping of configuration keys to values. If no valid config/profile is found, returns an empty dict. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. :exc:`ValueError`: Profile name not found. Examples: This example loads a profile based on configurations from two files. It finds the first profile, dw2000a, in the first file, dwave_a.conf, and adds to the values of the defaults section, overwriting the existing client value, while ignoring the profile in the second file, dwave_b.conf. The files, which are located in the current working directory, are (1) dwave_a.conf:: [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] client = sw solver = EXAMPLE_2000Q_SYSTEM_A token = DEF-987654321987654321987654321 and (2) dwave_b.conf:: [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = qpu solver = EXAMPLE_2000Q_SYSTEM_B The following example code loads profile values from parsing both these files, by default loading the first profile encountered or an explicitly specified profile. >>> import dwave.cloud as dc >>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP {'client': u'sw', 'endpoint': u'https://url.of.some.dwavesystem.com/sapi', 'solver': u'EXAMPLE_2000Q_SYSTEM_A', 'token': u'DEF-987654321987654321987654321'} >>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"], ... profile='dw2000b') # doctest: +SKIP {'client': u'qpu', 'endpoint': u'https://url.of.some.other.dwavesystem.com/sapi', 'solver': u'EXAMPLE_2000Q_SYSTEM_B', 'token': u'ABC-123456789123456789123456789'} """ # progressively build config from a file, or a list of auto-detected files # raises ConfigFileReadError/ConfigFileParseError on error config = load_config_from_files(filenames) # determine profile name fallback: # (1) profile key under [defaults], # (2) first non-[defaults] section # (3) [defaults] section first_section = next(iter(config.sections() + [None])) config_defaults = config.defaults() if not profile: profile = config_defaults.get('profile', first_section) if profile: try: section = dict(config[profile]) except KeyError: raise ValueError("Config profile {!r} not found".format(profile)) else: # as the very last resort (unspecified profile name and # no profiles defined in config), try to use [defaults] if config_defaults: section = config_defaults else: section = {} return section
[ "def", "load_profile_from_files", "(", "filenames", "=", "None", ",", "profile", "=", "None", ")", ":", "# progressively build config from a file, or a list of auto-detected files", "# raises ConfigFileReadError/ConfigFileParseError on error", "config", "=", "load_config_from_files", "(", "filenames", ")", "# determine profile name fallback:", "# (1) profile key under [defaults],", "# (2) first non-[defaults] section", "# (3) [defaults] section", "first_section", "=", "next", "(", "iter", "(", "config", ".", "sections", "(", ")", "+", "[", "None", "]", ")", ")", "config_defaults", "=", "config", ".", "defaults", "(", ")", "if", "not", "profile", ":", "profile", "=", "config_defaults", ".", "get", "(", "'profile'", ",", "first_section", ")", "if", "profile", ":", "try", ":", "section", "=", "dict", "(", "config", "[", "profile", "]", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Config profile {!r} not found\"", ".", "format", "(", "profile", ")", ")", "else", ":", "# as the very last resort (unspecified profile name and", "# no profiles defined in config), try to use [defaults]", "if", "config_defaults", ":", "section", "=", "config_defaults", "else", ":", "section", "=", "{", "}", "return", "section" ]
Load a profile from a list of D-Wave Cloud Client configuration files. .. note:: This method is not standardly used to set up D-Wave Cloud Client configuration. It is recommended you use :meth:`.Client.from_config` or :meth:`.config.load_config` instead. Configuration files comply with standard Windows INI-like format, parsable with Python's :mod:`configparser`. Each file in the list is progressively searched until the first profile is found. This function does not input profile information from environment variables. Args: filenames (list[str], default=None): D-Wave cloud client configuration files (path and name). If ``None``, searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. profile (str, default=None): Name of profile to return from reading the configuration from the specified configuration file(s). If ``None``, progressively falls back in the following order: (1) ``profile`` key following ``[defaults]`` section. (2) First non-``[defaults]`` section. (3) ``[defaults]`` section. Returns: dict: Mapping of configuration keys to values. If no valid config/profile is found, returns an empty dict. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. :exc:`ValueError`: Profile name not found. Examples: This example loads a profile based on configurations from two files. It finds the first profile, dw2000a, in the first file, dwave_a.conf, and adds to the values of the defaults section, overwriting the existing client value, while ignoring the profile in the second file, dwave_b.conf. The files, which are located in the current working directory, are (1) dwave_a.conf:: [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] client = sw solver = EXAMPLE_2000Q_SYSTEM_A token = DEF-987654321987654321987654321 and (2) dwave_b.conf:: [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = qpu solver = EXAMPLE_2000Q_SYSTEM_B The following example code loads profile values from parsing both these files, by default loading the first profile encountered or an explicitly specified profile. >>> import dwave.cloud as dc >>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP {'client': u'sw', 'endpoint': u'https://url.of.some.dwavesystem.com/sapi', 'solver': u'EXAMPLE_2000Q_SYSTEM_A', 'token': u'DEF-987654321987654321987654321'} >>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"], ... profile='dw2000b') # doctest: +SKIP {'client': u'qpu', 'endpoint': u'https://url.of.some.other.dwavesystem.com/sapi', 'solver': u'EXAMPLE_2000Q_SYSTEM_B', 'token': u'ABC-123456789123456789123456789'}
[ "Load", "a", "profile", "from", "a", "list", "of", "D", "-", "Wave", "Cloud", "Client", "configuration", "files", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/config.py#L471-L584
dwavesystems/dwave-cloud-client
dwave/cloud/config.py
load_config
def load_config(config_file=None, profile=None, client=None, endpoint=None, token=None, solver=None, proxy=None): """Load D-Wave Cloud Client configuration based on a configuration file. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`load_config()`. These values replace values read from a configuration file, and therefore must be **strings**, including float values for timeouts, boolean flags (tested for "truthiness"), and solver feature constraints (a dictionary encoded as JSON). 2. Values specified as environment variables. 3. Values specified in the configuration file. Configuration-file format is described in :mod:`dwave.cloud.config`. If the location of the configuration file is not specified, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If a configuration file explicitly specified, via an argument or environment variable, does not exist or is unreadable, loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is readable but invalid as a configuration file. Similarly, if a profile explicitly specified, via an argument or environment variable, is not present in the loaded configuration, loading fails with :exc:`ValueError`. Explicit profile selection also fails if the configuration file is not explicitly specified, detected on the system, or defined via an environment variable. Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``. Environment variables are described in :mod:`dwave.cloud.config`. Args: config_file (str/[str]/None/False/True, default=None): Path to configuration file(s). If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment variable if defined. If the environment variable is undefined or empty, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If `False`, loading from file(s) is skipped; if `True`, forces auto-detection (regardless of the `DWAVE_CONFIG_FILE` environment variable). profile (str, default=None): Profile name (name of the profile section in the configuration file). If undefined, inferred from `DWAVE_PROFILE` environment variable if defined. If the environment variable is undefined or empty, a profile is selected in the following order: 1. From the default section if it includes a profile key. 2. The first section (after the default section). 3. If no other section is defined besides `[defaults]`, the defaults section is promoted and selected. client (str, default=None): Client type used for accessing the API. Supported values are `qpu` for :class:`dwave.cloud.qpu.Client` and `sw` for :class:`dwave.cloud.sw.Client`. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (str, default=None): :term:`solver` features, as a JSON-encoded dictionary of feature constraints, the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for semantics of supported feature constraints. If undefined, the client uses a solver definition from environment variables, a configuration file, or falls back to the first available online solver. For backward compatibility, solver name in string format is accepted and converted to ``{"name": <solver name>}``. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses the system-level proxy, if defined, or connects directly to the API. Returns: dict: Mapping of configuration keys to values for the profile (section), as read from the configuration file and optionally overridden by environment values and specified keyword arguments. Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy` keys. Raises: :exc:`ValueError`: Invalid (non-existing) profile name. :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples This example loads the configuration from an auto-detected configuration file in the home directory of a Windows system user. >>> import dwave.cloud as dc >>> dc.config.load_config() {'client': u'qpu', 'endpoint': u'https://url.of.some.dwavesystem.com/sapi', 'proxy': None, 'solver': u'EXAMPLE_2000Q_SYSTEM_A', 'token': u'DEF-987654321987654321987654321'} >>> See which configuration file was loaded >>> dc.config.get_configfile_paths() [u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf'] Additional examples are given in :mod:`dwave.cloud.config`. """ if profile is None: profile = os.getenv("DWAVE_PROFILE") if config_file == False: # skip loading from file altogether section = {} elif config_file == True: # force auto-detection, disregarding DWAVE_CONFIG_FILE section = load_profile_from_files(None, profile) else: # auto-detect if not specified with arg or env if config_file is None: # note: both empty and undefined DWAVE_CONFIG_FILE treated as None config_file = os.getenv("DWAVE_CONFIG_FILE") # handle ''/None/str/[str] for `config_file` (after env) filenames = None if config_file: if isinstance(config_file, six.string_types): filenames = [config_file] else: filenames = config_file section = load_profile_from_files(filenames, profile) # override a selected subset of values via env or kwargs, # pass-through the rest unmodified section['client'] = client or os.getenv("DWAVE_API_CLIENT", section.get('client')) section['endpoint'] = endpoint or os.getenv("DWAVE_API_ENDPOINT", section.get('endpoint')) section['token'] = token or os.getenv("DWAVE_API_TOKEN", section.get('token')) section['solver'] = solver or os.getenv("DWAVE_API_SOLVER", section.get('solver')) section['proxy'] = proxy or os.getenv("DWAVE_API_PROXY", section.get('proxy')) return section
python
def load_config(config_file=None, profile=None, client=None, endpoint=None, token=None, solver=None, proxy=None): """Load D-Wave Cloud Client configuration based on a configuration file. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`load_config()`. These values replace values read from a configuration file, and therefore must be **strings**, including float values for timeouts, boolean flags (tested for "truthiness"), and solver feature constraints (a dictionary encoded as JSON). 2. Values specified as environment variables. 3. Values specified in the configuration file. Configuration-file format is described in :mod:`dwave.cloud.config`. If the location of the configuration file is not specified, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If a configuration file explicitly specified, via an argument or environment variable, does not exist or is unreadable, loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is readable but invalid as a configuration file. Similarly, if a profile explicitly specified, via an argument or environment variable, is not present in the loaded configuration, loading fails with :exc:`ValueError`. Explicit profile selection also fails if the configuration file is not explicitly specified, detected on the system, or defined via an environment variable. Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``. Environment variables are described in :mod:`dwave.cloud.config`. Args: config_file (str/[str]/None/False/True, default=None): Path to configuration file(s). If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment variable if defined. If the environment variable is undefined or empty, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If `False`, loading from file(s) is skipped; if `True`, forces auto-detection (regardless of the `DWAVE_CONFIG_FILE` environment variable). profile (str, default=None): Profile name (name of the profile section in the configuration file). If undefined, inferred from `DWAVE_PROFILE` environment variable if defined. If the environment variable is undefined or empty, a profile is selected in the following order: 1. From the default section if it includes a profile key. 2. The first section (after the default section). 3. If no other section is defined besides `[defaults]`, the defaults section is promoted and selected. client (str, default=None): Client type used for accessing the API. Supported values are `qpu` for :class:`dwave.cloud.qpu.Client` and `sw` for :class:`dwave.cloud.sw.Client`. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (str, default=None): :term:`solver` features, as a JSON-encoded dictionary of feature constraints, the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for semantics of supported feature constraints. If undefined, the client uses a solver definition from environment variables, a configuration file, or falls back to the first available online solver. For backward compatibility, solver name in string format is accepted and converted to ``{"name": <solver name>}``. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses the system-level proxy, if defined, or connects directly to the API. Returns: dict: Mapping of configuration keys to values for the profile (section), as read from the configuration file and optionally overridden by environment values and specified keyword arguments. Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy` keys. Raises: :exc:`ValueError`: Invalid (non-existing) profile name. :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples This example loads the configuration from an auto-detected configuration file in the home directory of a Windows system user. >>> import dwave.cloud as dc >>> dc.config.load_config() {'client': u'qpu', 'endpoint': u'https://url.of.some.dwavesystem.com/sapi', 'proxy': None, 'solver': u'EXAMPLE_2000Q_SYSTEM_A', 'token': u'DEF-987654321987654321987654321'} >>> See which configuration file was loaded >>> dc.config.get_configfile_paths() [u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf'] Additional examples are given in :mod:`dwave.cloud.config`. """ if profile is None: profile = os.getenv("DWAVE_PROFILE") if config_file == False: # skip loading from file altogether section = {} elif config_file == True: # force auto-detection, disregarding DWAVE_CONFIG_FILE section = load_profile_from_files(None, profile) else: # auto-detect if not specified with arg or env if config_file is None: # note: both empty and undefined DWAVE_CONFIG_FILE treated as None config_file = os.getenv("DWAVE_CONFIG_FILE") # handle ''/None/str/[str] for `config_file` (after env) filenames = None if config_file: if isinstance(config_file, six.string_types): filenames = [config_file] else: filenames = config_file section = load_profile_from_files(filenames, profile) # override a selected subset of values via env or kwargs, # pass-through the rest unmodified section['client'] = client or os.getenv("DWAVE_API_CLIENT", section.get('client')) section['endpoint'] = endpoint or os.getenv("DWAVE_API_ENDPOINT", section.get('endpoint')) section['token'] = token or os.getenv("DWAVE_API_TOKEN", section.get('token')) section['solver'] = solver or os.getenv("DWAVE_API_SOLVER", section.get('solver')) section['proxy'] = proxy or os.getenv("DWAVE_API_PROXY", section.get('proxy')) return section
[ "def", "load_config", "(", "config_file", "=", "None", ",", "profile", "=", "None", ",", "client", "=", "None", ",", "endpoint", "=", "None", ",", "token", "=", "None", ",", "solver", "=", "None", ",", "proxy", "=", "None", ")", ":", "if", "profile", "is", "None", ":", "profile", "=", "os", ".", "getenv", "(", "\"DWAVE_PROFILE\"", ")", "if", "config_file", "==", "False", ":", "# skip loading from file altogether", "section", "=", "{", "}", "elif", "config_file", "==", "True", ":", "# force auto-detection, disregarding DWAVE_CONFIG_FILE", "section", "=", "load_profile_from_files", "(", "None", ",", "profile", ")", "else", ":", "# auto-detect if not specified with arg or env", "if", "config_file", "is", "None", ":", "# note: both empty and undefined DWAVE_CONFIG_FILE treated as None", "config_file", "=", "os", ".", "getenv", "(", "\"DWAVE_CONFIG_FILE\"", ")", "# handle ''/None/str/[str] for `config_file` (after env)", "filenames", "=", "None", "if", "config_file", ":", "if", "isinstance", "(", "config_file", ",", "six", ".", "string_types", ")", ":", "filenames", "=", "[", "config_file", "]", "else", ":", "filenames", "=", "config_file", "section", "=", "load_profile_from_files", "(", "filenames", ",", "profile", ")", "# override a selected subset of values via env or kwargs,", "# pass-through the rest unmodified", "section", "[", "'client'", "]", "=", "client", "or", "os", ".", "getenv", "(", "\"DWAVE_API_CLIENT\"", ",", "section", ".", "get", "(", "'client'", ")", ")", "section", "[", "'endpoint'", "]", "=", "endpoint", "or", "os", ".", "getenv", "(", "\"DWAVE_API_ENDPOINT\"", ",", "section", ".", "get", "(", "'endpoint'", ")", ")", "section", "[", "'token'", "]", "=", "token", "or", "os", ".", "getenv", "(", "\"DWAVE_API_TOKEN\"", ",", "section", ".", "get", "(", "'token'", ")", ")", "section", "[", "'solver'", "]", "=", "solver", "or", "os", ".", "getenv", "(", "\"DWAVE_API_SOLVER\"", ",", "section", ".", "get", "(", "'solver'", ")", ")", "section", "[", "'proxy'", "]", "=", "proxy", "or", "os", ".", "getenv", "(", "\"DWAVE_API_PROXY\"", ",", "section", ".", "get", "(", "'proxy'", ")", ")", "return", "section" ]
Load D-Wave Cloud Client configuration based on a configuration file. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`load_config()`. These values replace values read from a configuration file, and therefore must be **strings**, including float values for timeouts, boolean flags (tested for "truthiness"), and solver feature constraints (a dictionary encoded as JSON). 2. Values specified as environment variables. 3. Values specified in the configuration file. Configuration-file format is described in :mod:`dwave.cloud.config`. If the location of the configuration file is not specified, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If a configuration file explicitly specified, via an argument or environment variable, does not exist or is unreadable, loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is readable but invalid as a configuration file. Similarly, if a profile explicitly specified, via an argument or environment variable, is not present in the loaded configuration, loading fails with :exc:`ValueError`. Explicit profile selection also fails if the configuration file is not explicitly specified, detected on the system, or defined via an environment variable. Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``. Environment variables are described in :mod:`dwave.cloud.config`. Args: config_file (str/[str]/None/False/True, default=None): Path to configuration file(s). If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment variable if defined. If the environment variable is undefined or empty, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If `False`, loading from file(s) is skipped; if `True`, forces auto-detection (regardless of the `DWAVE_CONFIG_FILE` environment variable). profile (str, default=None): Profile name (name of the profile section in the configuration file). If undefined, inferred from `DWAVE_PROFILE` environment variable if defined. If the environment variable is undefined or empty, a profile is selected in the following order: 1. From the default section if it includes a profile key. 2. The first section (after the default section). 3. If no other section is defined besides `[defaults]`, the defaults section is promoted and selected. client (str, default=None): Client type used for accessing the API. Supported values are `qpu` for :class:`dwave.cloud.qpu.Client` and `sw` for :class:`dwave.cloud.sw.Client`. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (str, default=None): :term:`solver` features, as a JSON-encoded dictionary of feature constraints, the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for semantics of supported feature constraints. If undefined, the client uses a solver definition from environment variables, a configuration file, or falls back to the first available online solver. For backward compatibility, solver name in string format is accepted and converted to ``{"name": <solver name>}``. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses the system-level proxy, if defined, or connects directly to the API. Returns: dict: Mapping of configuration keys to values for the profile (section), as read from the configuration file and optionally overridden by environment values and specified keyword arguments. Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy` keys. Raises: :exc:`ValueError`: Invalid (non-existing) profile name. :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples This example loads the configuration from an auto-detected configuration file in the home directory of a Windows system user. >>> import dwave.cloud as dc >>> dc.config.load_config() {'client': u'qpu', 'endpoint': u'https://url.of.some.dwavesystem.com/sapi', 'proxy': None, 'solver': u'EXAMPLE_2000Q_SYSTEM_A', 'token': u'DEF-987654321987654321987654321'} >>> See which configuration file was loaded >>> dc.config.get_configfile_paths() [u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf'] Additional examples are given in :mod:`dwave.cloud.config`.
[ "Load", "D", "-", "Wave", "Cloud", "Client", "configuration", "based", "on", "a", "configuration", "file", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/config.py#L619-L778
dwavesystems/dwave-cloud-client
dwave/cloud/config.py
legacy_load_config
def legacy_load_config(profile=None, endpoint=None, token=None, solver=None, proxy=None, **kwargs): """Load configured URLs and token for the SAPI server. .. warning:: Included only for backward compatibility. Please use :func:`load_config` or the client factory :meth:`~dwave.cloud.client.Client.from_config` instead. This method tries to load a legacy configuration file from ``~/.dwrc``, select a specified `profile` (or, if not specified, the first profile), and override individual keys with values read from environment variables or specified explicitly as key values in the function. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`legacy_load_config()` 2. Values specified as environment variables 3. Values specified in the legacy ``~/.dwrc`` configuration file Environment variables searched for are: - ``DW_INTERNAL__HTTPLINK`` - ``DW_INTERNAL__TOKEN`` - ``DW_INTERNAL__HTTPPROXY`` - ``DW_INTERNAL__SOLVER`` Legacy configuration file format is a modified CSV where the first comma is replaced with a bar character (``|``). Each line encodes a single profile. Its columns are:: profile_name|endpoint_url,authentication_token,proxy_url,default_solver_name All its fields after ``authentication_token`` are optional. When there are multiple connections in a file, the first one is the default. Any commas in the URLs must be percent-encoded. Args: profile (str): Profile name in the legacy configuration file. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (str, default=None): Default solver to use in :meth:`~dwave.cloud.client.Client.get_solver`. If undefined, all calls to :meth:`~dwave.cloud.client.Client.get_solver` must explicitly specify the solver name/id. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses a system-level proxy, if defined, or connects directly to the API. Returns: Dictionary with keys: endpoint, token, solver, and proxy. Examples: This example creates a client using the :meth:`~dwave.cloud.client.Client.from_config` method, which falls back on the legacy file by default when it fails to find a D-Wave Cloud Client configuration file (setting its `legacy_config_fallback` parameter to False precludes this fall-back operation). For this example, no D-Wave Cloud Client configuration file is present on the local system; instead the following ``.dwrc`` legacy configuration file is present in the user's home directory:: profile-a|https://one.com,token-one profile-b|https://two.com,token-two The following example code creates a client without explicitly specifying key values, therefore auto-detection searches for existing (non-legacy) configuration files in the standard directories of :func:`get_configfile_paths` and, failing to find one, falls back on the existing legacy configuration file above. >>> import dwave.cloud as dc >>> client = dwave.cloud.Client.from_config() # doctest: +SKIP >>> client.endpoint # doctest: +SKIP 'https://one.com' >>> client.token # doctest: +SKIP 'token-one' The following examples specify a profile and/or token. >>> # Explicitly specify a profile >>> client = dwave.cloud.Client.from_config(profile='profile-b') >>> # Will try to connect with the url `https://two.com` and the token `token-two`. >>> client = dwave.cloud.Client.from_config(profile='profile-b', token='new-token') >>> # Will try to connect with the url `https://two.com` and the token `new-token`. """ def _parse_config(fp, filename): fields = ('endpoint', 'token', 'proxy', 'solver') config = OrderedDict() for line in fp: # strip whitespace, skip blank and comment lines line = line.strip() if not line or line.startswith('#'): continue # parse each record, store in dict with label as key try: label, data = line.split('|', 1) values = [v.strip() or None for v in data.split(',')] config[label] = dict(zip(fields, values)) except: raise ConfigFileParseError( "Failed to parse {!r}, line {!r}".format(filename, line)) return config def _read_config(filename): try: with open(filename, 'r') as f: return _parse_config(f, filename) except (IOError, OSError): raise ConfigFileReadError("Failed to read {!r}".format(filename)) config = {} filename = os.path.expanduser('~/.dwrc') if os.path.exists(filename): config = _read_config(filename) # load profile if specified, or first one in file if profile: try: section = config[profile] except KeyError: raise ValueError("Config profile {!r} not found".format(profile)) else: try: _, section = next(iter(config.items())) except StopIteration: section = {} # override config variables (if any) with environment and then with arguments section['endpoint'] = endpoint or os.getenv("DW_INTERNAL__HTTPLINK", section.get('endpoint')) section['token'] = token or os.getenv("DW_INTERNAL__TOKEN", section.get('token')) section['proxy'] = proxy or os.getenv("DW_INTERNAL__HTTPPROXY", section.get('proxy')) section['solver'] = solver or os.getenv("DW_INTERNAL__SOLVER", section.get('solver')) section.update(kwargs) return section
python
def legacy_load_config(profile=None, endpoint=None, token=None, solver=None, proxy=None, **kwargs): """Load configured URLs and token for the SAPI server. .. warning:: Included only for backward compatibility. Please use :func:`load_config` or the client factory :meth:`~dwave.cloud.client.Client.from_config` instead. This method tries to load a legacy configuration file from ``~/.dwrc``, select a specified `profile` (or, if not specified, the first profile), and override individual keys with values read from environment variables or specified explicitly as key values in the function. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`legacy_load_config()` 2. Values specified as environment variables 3. Values specified in the legacy ``~/.dwrc`` configuration file Environment variables searched for are: - ``DW_INTERNAL__HTTPLINK`` - ``DW_INTERNAL__TOKEN`` - ``DW_INTERNAL__HTTPPROXY`` - ``DW_INTERNAL__SOLVER`` Legacy configuration file format is a modified CSV where the first comma is replaced with a bar character (``|``). Each line encodes a single profile. Its columns are:: profile_name|endpoint_url,authentication_token,proxy_url,default_solver_name All its fields after ``authentication_token`` are optional. When there are multiple connections in a file, the first one is the default. Any commas in the URLs must be percent-encoded. Args: profile (str): Profile name in the legacy configuration file. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (str, default=None): Default solver to use in :meth:`~dwave.cloud.client.Client.get_solver`. If undefined, all calls to :meth:`~dwave.cloud.client.Client.get_solver` must explicitly specify the solver name/id. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses a system-level proxy, if defined, or connects directly to the API. Returns: Dictionary with keys: endpoint, token, solver, and proxy. Examples: This example creates a client using the :meth:`~dwave.cloud.client.Client.from_config` method, which falls back on the legacy file by default when it fails to find a D-Wave Cloud Client configuration file (setting its `legacy_config_fallback` parameter to False precludes this fall-back operation). For this example, no D-Wave Cloud Client configuration file is present on the local system; instead the following ``.dwrc`` legacy configuration file is present in the user's home directory:: profile-a|https://one.com,token-one profile-b|https://two.com,token-two The following example code creates a client without explicitly specifying key values, therefore auto-detection searches for existing (non-legacy) configuration files in the standard directories of :func:`get_configfile_paths` and, failing to find one, falls back on the existing legacy configuration file above. >>> import dwave.cloud as dc >>> client = dwave.cloud.Client.from_config() # doctest: +SKIP >>> client.endpoint # doctest: +SKIP 'https://one.com' >>> client.token # doctest: +SKIP 'token-one' The following examples specify a profile and/or token. >>> # Explicitly specify a profile >>> client = dwave.cloud.Client.from_config(profile='profile-b') >>> # Will try to connect with the url `https://two.com` and the token `token-two`. >>> client = dwave.cloud.Client.from_config(profile='profile-b', token='new-token') >>> # Will try to connect with the url `https://two.com` and the token `new-token`. """ def _parse_config(fp, filename): fields = ('endpoint', 'token', 'proxy', 'solver') config = OrderedDict() for line in fp: # strip whitespace, skip blank and comment lines line = line.strip() if not line or line.startswith('#'): continue # parse each record, store in dict with label as key try: label, data = line.split('|', 1) values = [v.strip() or None for v in data.split(',')] config[label] = dict(zip(fields, values)) except: raise ConfigFileParseError( "Failed to parse {!r}, line {!r}".format(filename, line)) return config def _read_config(filename): try: with open(filename, 'r') as f: return _parse_config(f, filename) except (IOError, OSError): raise ConfigFileReadError("Failed to read {!r}".format(filename)) config = {} filename = os.path.expanduser('~/.dwrc') if os.path.exists(filename): config = _read_config(filename) # load profile if specified, or first one in file if profile: try: section = config[profile] except KeyError: raise ValueError("Config profile {!r} not found".format(profile)) else: try: _, section = next(iter(config.items())) except StopIteration: section = {} # override config variables (if any) with environment and then with arguments section['endpoint'] = endpoint or os.getenv("DW_INTERNAL__HTTPLINK", section.get('endpoint')) section['token'] = token or os.getenv("DW_INTERNAL__TOKEN", section.get('token')) section['proxy'] = proxy or os.getenv("DW_INTERNAL__HTTPPROXY", section.get('proxy')) section['solver'] = solver or os.getenv("DW_INTERNAL__SOLVER", section.get('solver')) section.update(kwargs) return section
[ "def", "legacy_load_config", "(", "profile", "=", "None", ",", "endpoint", "=", "None", ",", "token", "=", "None", ",", "solver", "=", "None", ",", "proxy", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "_parse_config", "(", "fp", ",", "filename", ")", ":", "fields", "=", "(", "'endpoint'", ",", "'token'", ",", "'proxy'", ",", "'solver'", ")", "config", "=", "OrderedDict", "(", ")", "for", "line", "in", "fp", ":", "# strip whitespace, skip blank and comment lines", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", "or", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "# parse each record, store in dict with label as key", "try", ":", "label", ",", "data", "=", "line", ".", "split", "(", "'|'", ",", "1", ")", "values", "=", "[", "v", ".", "strip", "(", ")", "or", "None", "for", "v", "in", "data", ".", "split", "(", "','", ")", "]", "config", "[", "label", "]", "=", "dict", "(", "zip", "(", "fields", ",", "values", ")", ")", "except", ":", "raise", "ConfigFileParseError", "(", "\"Failed to parse {!r}, line {!r}\"", ".", "format", "(", "filename", ",", "line", ")", ")", "return", "config", "def", "_read_config", "(", "filename", ")", ":", "try", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "return", "_parse_config", "(", "f", ",", "filename", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "raise", "ConfigFileReadError", "(", "\"Failed to read {!r}\"", ".", "format", "(", "filename", ")", ")", "config", "=", "{", "}", "filename", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.dwrc'", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "config", "=", "_read_config", "(", "filename", ")", "# load profile if specified, or first one in file", "if", "profile", ":", "try", ":", "section", "=", "config", "[", "profile", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Config profile {!r} not found\"", ".", "format", "(", "profile", ")", ")", "else", ":", "try", ":", "_", ",", "section", "=", "next", "(", "iter", "(", "config", ".", "items", "(", ")", ")", ")", "except", "StopIteration", ":", "section", "=", "{", "}", "# override config variables (if any) with environment and then with arguments", "section", "[", "'endpoint'", "]", "=", "endpoint", "or", "os", ".", "getenv", "(", "\"DW_INTERNAL__HTTPLINK\"", ",", "section", ".", "get", "(", "'endpoint'", ")", ")", "section", "[", "'token'", "]", "=", "token", "or", "os", ".", "getenv", "(", "\"DW_INTERNAL__TOKEN\"", ",", "section", ".", "get", "(", "'token'", ")", ")", "section", "[", "'proxy'", "]", "=", "proxy", "or", "os", ".", "getenv", "(", "\"DW_INTERNAL__HTTPPROXY\"", ",", "section", ".", "get", "(", "'proxy'", ")", ")", "section", "[", "'solver'", "]", "=", "solver", "or", "os", ".", "getenv", "(", "\"DW_INTERNAL__SOLVER\"", ",", "section", ".", "get", "(", "'solver'", ")", ")", "section", ".", "update", "(", "kwargs", ")", "return", "section" ]
Load configured URLs and token for the SAPI server. .. warning:: Included only for backward compatibility. Please use :func:`load_config` or the client factory :meth:`~dwave.cloud.client.Client.from_config` instead. This method tries to load a legacy configuration file from ``~/.dwrc``, select a specified `profile` (or, if not specified, the first profile), and override individual keys with values read from environment variables or specified explicitly as key values in the function. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`legacy_load_config()` 2. Values specified as environment variables 3. Values specified in the legacy ``~/.dwrc`` configuration file Environment variables searched for are: - ``DW_INTERNAL__HTTPLINK`` - ``DW_INTERNAL__TOKEN`` - ``DW_INTERNAL__HTTPPROXY`` - ``DW_INTERNAL__SOLVER`` Legacy configuration file format is a modified CSV where the first comma is replaced with a bar character (``|``). Each line encodes a single profile. Its columns are:: profile_name|endpoint_url,authentication_token,proxy_url,default_solver_name All its fields after ``authentication_token`` are optional. When there are multiple connections in a file, the first one is the default. Any commas in the URLs must be percent-encoded. Args: profile (str): Profile name in the legacy configuration file. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (str, default=None): Default solver to use in :meth:`~dwave.cloud.client.Client.get_solver`. If undefined, all calls to :meth:`~dwave.cloud.client.Client.get_solver` must explicitly specify the solver name/id. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses a system-level proxy, if defined, or connects directly to the API. Returns: Dictionary with keys: endpoint, token, solver, and proxy. Examples: This example creates a client using the :meth:`~dwave.cloud.client.Client.from_config` method, which falls back on the legacy file by default when it fails to find a D-Wave Cloud Client configuration file (setting its `legacy_config_fallback` parameter to False precludes this fall-back operation). For this example, no D-Wave Cloud Client configuration file is present on the local system; instead the following ``.dwrc`` legacy configuration file is present in the user's home directory:: profile-a|https://one.com,token-one profile-b|https://two.com,token-two The following example code creates a client without explicitly specifying key values, therefore auto-detection searches for existing (non-legacy) configuration files in the standard directories of :func:`get_configfile_paths` and, failing to find one, falls back on the existing legacy configuration file above. >>> import dwave.cloud as dc >>> client = dwave.cloud.Client.from_config() # doctest: +SKIP >>> client.endpoint # doctest: +SKIP 'https://one.com' >>> client.token # doctest: +SKIP 'token-one' The following examples specify a profile and/or token. >>> # Explicitly specify a profile >>> client = dwave.cloud.Client.from_config(profile='profile-b') >>> # Will try to connect with the url `https://two.com` and the token `token-two`. >>> client = dwave.cloud.Client.from_config(profile='profile-b', token='new-token') >>> # Will try to connect with the url `https://two.com` and the token `new-token`.
[ "Load", "configured", "URLs", "and", "token", "for", "the", "SAPI", "server", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/config.py#L781-L925
tuxu/python-samplerate
samplerate/lowlevel.py
_check_data
def _check_data(data): """Check whether `data` is a valid input/output for libsamplerate. Returns ------- num_frames Number of frames in `data`. channels Number of channels in `data`. Raises ------ ValueError: If invalid data is supplied. """ if not (data.dtype == _np.float32 and data.flags.c_contiguous): raise ValueError('supplied data must be float32 and C contiguous') if data.ndim == 2: num_frames, channels = data.shape elif data.ndim == 1: num_frames, channels = data.size, 1 else: raise ValueError('rank > 2 not supported') return num_frames, channels
python
def _check_data(data): """Check whether `data` is a valid input/output for libsamplerate. Returns ------- num_frames Number of frames in `data`. channels Number of channels in `data`. Raises ------ ValueError: If invalid data is supplied. """ if not (data.dtype == _np.float32 and data.flags.c_contiguous): raise ValueError('supplied data must be float32 and C contiguous') if data.ndim == 2: num_frames, channels = data.shape elif data.ndim == 1: num_frames, channels = data.size, 1 else: raise ValueError('rank > 2 not supported') return num_frames, channels
[ "def", "_check_data", "(", "data", ")", ":", "if", "not", "(", "data", ".", "dtype", "==", "_np", ".", "float32", "and", "data", ".", "flags", ".", "c_contiguous", ")", ":", "raise", "ValueError", "(", "'supplied data must be float32 and C contiguous'", ")", "if", "data", ".", "ndim", "==", "2", ":", "num_frames", ",", "channels", "=", "data", ".", "shape", "elif", "data", ".", "ndim", "==", "1", ":", "num_frames", ",", "channels", "=", "data", ".", "size", ",", "1", "else", ":", "raise", "ValueError", "(", "'rank > 2 not supported'", ")", "return", "num_frames", ",", "channels" ]
Check whether `data` is a valid input/output for libsamplerate. Returns ------- num_frames Number of frames in `data`. channels Number of channels in `data`. Raises ------ ValueError: If invalid data is supplied.
[ "Check", "whether", "data", "is", "a", "valid", "input", "/", "output", "for", "libsamplerate", "." ]
train
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L41-L63
tuxu/python-samplerate
samplerate/lowlevel.py
src_simple
def src_simple(input_data, output_data, ratio, converter_type, channels): """Perform a single conversion from an input buffer to an output buffer. Simple interface for performing a single conversion from input buffer to output buffer at a fixed conversion ratio. Simple interface does not require initialisation as it can only operate on a single buffer worth of audio. """ input_frames, _ = _check_data(input_data) output_frames, _ = _check_data(output_data) data = ffi.new('SRC_DATA*') data.input_frames = input_frames data.output_frames = output_frames data.src_ratio = ratio data.data_in = ffi.cast('float*', ffi.from_buffer(input_data)) data.data_out = ffi.cast('float*', ffi.from_buffer(output_data)) error = _lib.src_simple(data, converter_type, channels) return error, data.input_frames_used, data.output_frames_gen
python
def src_simple(input_data, output_data, ratio, converter_type, channels): """Perform a single conversion from an input buffer to an output buffer. Simple interface for performing a single conversion from input buffer to output buffer at a fixed conversion ratio. Simple interface does not require initialisation as it can only operate on a single buffer worth of audio. """ input_frames, _ = _check_data(input_data) output_frames, _ = _check_data(output_data) data = ffi.new('SRC_DATA*') data.input_frames = input_frames data.output_frames = output_frames data.src_ratio = ratio data.data_in = ffi.cast('float*', ffi.from_buffer(input_data)) data.data_out = ffi.cast('float*', ffi.from_buffer(output_data)) error = _lib.src_simple(data, converter_type, channels) return error, data.input_frames_used, data.output_frames_gen
[ "def", "src_simple", "(", "input_data", ",", "output_data", ",", "ratio", ",", "converter_type", ",", "channels", ")", ":", "input_frames", ",", "_", "=", "_check_data", "(", "input_data", ")", "output_frames", ",", "_", "=", "_check_data", "(", "output_data", ")", "data", "=", "ffi", ".", "new", "(", "'SRC_DATA*'", ")", "data", ".", "input_frames", "=", "input_frames", "data", ".", "output_frames", "=", "output_frames", "data", ".", "src_ratio", "=", "ratio", "data", ".", "data_in", "=", "ffi", ".", "cast", "(", "'float*'", ",", "ffi", ".", "from_buffer", "(", "input_data", ")", ")", "data", ".", "data_out", "=", "ffi", ".", "cast", "(", "'float*'", ",", "ffi", ".", "from_buffer", "(", "output_data", ")", ")", "error", "=", "_lib", ".", "src_simple", "(", "data", ",", "converter_type", ",", "channels", ")", "return", "error", ",", "data", ".", "input_frames_used", ",", "data", ".", "output_frames_gen" ]
Perform a single conversion from an input buffer to an output buffer. Simple interface for performing a single conversion from input buffer to output buffer at a fixed conversion ratio. Simple interface does not require initialisation as it can only operate on a single buffer worth of audio.
[ "Perform", "a", "single", "conversion", "from", "an", "input", "buffer", "to", "an", "output", "buffer", "." ]
train
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L86-L102
tuxu/python-samplerate
samplerate/lowlevel.py
src_new
def src_new(converter_type, channels): """Initialise a new sample rate converter. Parameters ---------- converter_type : int Converter to be used. channels : int Number of channels. Returns ------- state An anonymous pointer to the internal state of the converter. error : int Error code. """ error = ffi.new('int*') state = _lib.src_new(converter_type, channels, error) return state, error[0]
python
def src_new(converter_type, channels): """Initialise a new sample rate converter. Parameters ---------- converter_type : int Converter to be used. channels : int Number of channels. Returns ------- state An anonymous pointer to the internal state of the converter. error : int Error code. """ error = ffi.new('int*') state = _lib.src_new(converter_type, channels, error) return state, error[0]
[ "def", "src_new", "(", "converter_type", ",", "channels", ")", ":", "error", "=", "ffi", ".", "new", "(", "'int*'", ")", "state", "=", "_lib", ".", "src_new", "(", "converter_type", ",", "channels", ",", "error", ")", "return", "state", ",", "error", "[", "0", "]" ]
Initialise a new sample rate converter. Parameters ---------- converter_type : int Converter to be used. channels : int Number of channels. Returns ------- state An anonymous pointer to the internal state of the converter. error : int Error code.
[ "Initialise", "a", "new", "sample", "rate", "converter", "." ]
train
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L105-L124
tuxu/python-samplerate
samplerate/lowlevel.py
src_process
def src_process(state, input_data, output_data, ratio, end_of_input=0): """Standard processing function. Returns non zero on error. """ input_frames, _ = _check_data(input_data) output_frames, _ = _check_data(output_data) data = ffi.new('SRC_DATA*') data.input_frames = input_frames data.output_frames = output_frames data.src_ratio = ratio data.data_in = ffi.cast('float*', ffi.from_buffer(input_data)) data.data_out = ffi.cast('float*', ffi.from_buffer(output_data)) data.end_of_input = end_of_input error = _lib.src_process(state, data) return error, data.input_frames_used, data.output_frames_gen
python
def src_process(state, input_data, output_data, ratio, end_of_input=0): """Standard processing function. Returns non zero on error. """ input_frames, _ = _check_data(input_data) output_frames, _ = _check_data(output_data) data = ffi.new('SRC_DATA*') data.input_frames = input_frames data.output_frames = output_frames data.src_ratio = ratio data.data_in = ffi.cast('float*', ffi.from_buffer(input_data)) data.data_out = ffi.cast('float*', ffi.from_buffer(output_data)) data.end_of_input = end_of_input error = _lib.src_process(state, data) return error, data.input_frames_used, data.output_frames_gen
[ "def", "src_process", "(", "state", ",", "input_data", ",", "output_data", ",", "ratio", ",", "end_of_input", "=", "0", ")", ":", "input_frames", ",", "_", "=", "_check_data", "(", "input_data", ")", "output_frames", ",", "_", "=", "_check_data", "(", "output_data", ")", "data", "=", "ffi", ".", "new", "(", "'SRC_DATA*'", ")", "data", ".", "input_frames", "=", "input_frames", "data", ".", "output_frames", "=", "output_frames", "data", ".", "src_ratio", "=", "ratio", "data", ".", "data_in", "=", "ffi", ".", "cast", "(", "'float*'", ",", "ffi", ".", "from_buffer", "(", "input_data", ")", ")", "data", ".", "data_out", "=", "ffi", ".", "cast", "(", "'float*'", ",", "ffi", ".", "from_buffer", "(", "output_data", ")", ")", "data", ".", "end_of_input", "=", "end_of_input", "error", "=", "_lib", ".", "src_process", "(", "state", ",", "data", ")", "return", "error", ",", "data", ".", "input_frames_used", ",", "data", ".", "output_frames_gen" ]
Standard processing function. Returns non zero on error.
[ "Standard", "processing", "function", "." ]
train
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L135-L150
tuxu/python-samplerate
samplerate/lowlevel.py
_src_input_callback
def _src_input_callback(cb_data, data): """Internal callback function to be used with the callback API. Pulls the Python callback function from the handle contained in `cb_data` and calls it to fetch frames. Frames are converted to the format required by the API (float, interleaved channels). A reference to these data is kept internally. Returns ------- frames : int The number of frames supplied. """ cb_data = ffi.from_handle(cb_data) ret = cb_data['callback']() if ret is None: cb_data['last_input'] = None return 0 # No frames supplied input_data = _np.require(ret, requirements='C', dtype=_np.float32) input_frames, channels = _check_data(input_data) # Check whether the correct number of channels is supplied by user. if cb_data['channels'] != channels: raise ValueError('Invalid number of channels in callback.') # Store a reference of the input data to ensure it is still alive when # accessed by libsamplerate. cb_data['last_input'] = input_data data[0] = ffi.cast('float*', ffi.from_buffer(input_data)) return input_frames
python
def _src_input_callback(cb_data, data): """Internal callback function to be used with the callback API. Pulls the Python callback function from the handle contained in `cb_data` and calls it to fetch frames. Frames are converted to the format required by the API (float, interleaved channels). A reference to these data is kept internally. Returns ------- frames : int The number of frames supplied. """ cb_data = ffi.from_handle(cb_data) ret = cb_data['callback']() if ret is None: cb_data['last_input'] = None return 0 # No frames supplied input_data = _np.require(ret, requirements='C', dtype=_np.float32) input_frames, channels = _check_data(input_data) # Check whether the correct number of channels is supplied by user. if cb_data['channels'] != channels: raise ValueError('Invalid number of channels in callback.') # Store a reference of the input data to ensure it is still alive when # accessed by libsamplerate. cb_data['last_input'] = input_data data[0] = ffi.cast('float*', ffi.from_buffer(input_data)) return input_frames
[ "def", "_src_input_callback", "(", "cb_data", ",", "data", ")", ":", "cb_data", "=", "ffi", ".", "from_handle", "(", "cb_data", ")", "ret", "=", "cb_data", "[", "'callback'", "]", "(", ")", "if", "ret", "is", "None", ":", "cb_data", "[", "'last_input'", "]", "=", "None", "return", "0", "# No frames supplied", "input_data", "=", "_np", ".", "require", "(", "ret", ",", "requirements", "=", "'C'", ",", "dtype", "=", "_np", ".", "float32", ")", "input_frames", ",", "channels", "=", "_check_data", "(", "input_data", ")", "# Check whether the correct number of channels is supplied by user.", "if", "cb_data", "[", "'channels'", "]", "!=", "channels", ":", "raise", "ValueError", "(", "'Invalid number of channels in callback.'", ")", "# Store a reference of the input data to ensure it is still alive when", "# accessed by libsamplerate.", "cb_data", "[", "'last_input'", "]", "=", "input_data", "data", "[", "0", "]", "=", "ffi", ".", "cast", "(", "'float*'", ",", "ffi", ".", "from_buffer", "(", "input_data", ")", ")", "return", "input_frames" ]
Internal callback function to be used with the callback API. Pulls the Python callback function from the handle contained in `cb_data` and calls it to fetch frames. Frames are converted to the format required by the API (float, interleaved channels). A reference to these data is kept internally. Returns ------- frames : int The number of frames supplied.
[ "Internal", "callback", "function", "to", "be", "used", "with", "the", "callback", "API", "." ]
train
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L184-L214
tuxu/python-samplerate
samplerate/lowlevel.py
src_callback_new
def src_callback_new(callback, converter_type, channels): """Initialisation for the callback based API. Parameters ---------- callback : function Called whenever new frames are to be read. Must return a NumPy array of shape (num_frames, channels). converter_type : int Converter to be used. channels : int Number of channels. Returns ------- state An anonymous pointer to the internal state of the converter. handle A CFFI handle to the callback data. error : int Error code. """ cb_data = {'callback': callback, 'channels': channels} handle = ffi.new_handle(cb_data) error = ffi.new('int*') state = _lib.src_callback_new(_src_input_callback, converter_type, channels, error, handle) if state == ffi.NULL: return None, handle, error[0] return state, handle, error[0]
python
def src_callback_new(callback, converter_type, channels): """Initialisation for the callback based API. Parameters ---------- callback : function Called whenever new frames are to be read. Must return a NumPy array of shape (num_frames, channels). converter_type : int Converter to be used. channels : int Number of channels. Returns ------- state An anonymous pointer to the internal state of the converter. handle A CFFI handle to the callback data. error : int Error code. """ cb_data = {'callback': callback, 'channels': channels} handle = ffi.new_handle(cb_data) error = ffi.new('int*') state = _lib.src_callback_new(_src_input_callback, converter_type, channels, error, handle) if state == ffi.NULL: return None, handle, error[0] return state, handle, error[0]
[ "def", "src_callback_new", "(", "callback", ",", "converter_type", ",", "channels", ")", ":", "cb_data", "=", "{", "'callback'", ":", "callback", ",", "'channels'", ":", "channels", "}", "handle", "=", "ffi", ".", "new_handle", "(", "cb_data", ")", "error", "=", "ffi", ".", "new", "(", "'int*'", ")", "state", "=", "_lib", ".", "src_callback_new", "(", "_src_input_callback", ",", "converter_type", ",", "channels", ",", "error", ",", "handle", ")", "if", "state", "==", "ffi", ".", "NULL", ":", "return", "None", ",", "handle", ",", "error", "[", "0", "]", "return", "state", ",", "handle", ",", "error", "[", "0", "]" ]
Initialisation for the callback based API. Parameters ---------- callback : function Called whenever new frames are to be read. Must return a NumPy array of shape (num_frames, channels). converter_type : int Converter to be used. channels : int Number of channels. Returns ------- state An anonymous pointer to the internal state of the converter. handle A CFFI handle to the callback data. error : int Error code.
[ "Initialisation", "for", "the", "callback", "based", "API", "." ]
train
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L217-L247
tuxu/python-samplerate
samplerate/lowlevel.py
src_callback_read
def src_callback_read(state, ratio, frames, data): """Read up to `frames` worth of data using the callback API. Returns ------- frames : int Number of frames read or -1 on error. """ data_ptr = ffi.cast('float*f', ffi.from_buffer(data)) return _lib.src_callback_read(state, ratio, frames, data_ptr)
python
def src_callback_read(state, ratio, frames, data): """Read up to `frames` worth of data using the callback API. Returns ------- frames : int Number of frames read or -1 on error. """ data_ptr = ffi.cast('float*f', ffi.from_buffer(data)) return _lib.src_callback_read(state, ratio, frames, data_ptr)
[ "def", "src_callback_read", "(", "state", ",", "ratio", ",", "frames", ",", "data", ")", ":", "data_ptr", "=", "ffi", ".", "cast", "(", "'float*f'", ",", "ffi", ".", "from_buffer", "(", "data", ")", ")", "return", "_lib", ".", "src_callback_read", "(", "state", ",", "ratio", ",", "frames", ",", "data_ptr", ")" ]
Read up to `frames` worth of data using the callback API. Returns ------- frames : int Number of frames read or -1 on error.
[ "Read", "up", "to", "frames", "worth", "of", "data", "using", "the", "callback", "API", "." ]
train
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L250-L259
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client.from_config
def from_config(cls, config_file=None, profile=None, client=None, endpoint=None, token=None, solver=None, proxy=None, legacy_config_fallback=False, **kwargs): """Client factory method to instantiate a client instance from configuration. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`from_config()` 2. Values specified as environment variables 3. Values specified in the configuration file Configuration-file format is described in :mod:`dwave.cloud.config`. If the location of the configuration file is not specified, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If a configuration file explicitly specified, via an argument or environment variable, does not exist or is unreadable, loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is readable but invalid as a configuration file. Similarly, if a profile explicitly specified, via an argument or environment variable, is not present in the loaded configuration, loading fails with :exc:`ValueError`. Explicit profile selection also fails if the configuration file is not explicitly specified, detected on the system, or defined via an environment variable. Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``. Environment variables are described in :mod:`dwave.cloud.config`. Args: config_file (str/[str]/None/False/True, default=None): Path to configuration file. If ``None``, the value is taken from ``DWAVE_CONFIG_FILE`` environment variable if defined. If the environment variable is undefined or empty, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If ``False``, loading from file is skipped; if ``True``, forces auto-detection (regardless of the ``DWAVE_CONFIG_FILE`` environment variable). profile (str, default=None): Profile name (name of the profile section in the configuration file). If undefined, inferred from ``DWAVE_PROFILE`` environment variable if defined. If the environment variable is undefined or empty, a profile is selected in the following order: 1. From the default section if it includes a profile key. 2. The first section (after the default section). 3. If no other section is defined besides ``[defaults]``, the defaults section is promoted and selected. client (str, default=None): Client type used for accessing the API. Supported values are ``qpu`` for :class:`dwave.cloud.qpu.Client` and ``sw`` for :class:`dwave.cloud.sw.Client`. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (dict/str, default=None): Default :term:`solver` features to use in :meth:`~dwave.cloud.client.Client.get_solver`. Defined via dictionary of solver feature constraints (see :meth:`~dwave.cloud.client.Client.get_solvers`). For backward compatibility, a solver name, as a string, is also accepted and converted to ``{"name": <solver name>}``. If undefined, :meth:`~dwave.cloud.client.Client.get_solver` uses a solver definition from environment variables, a configuration file, or falls back to the first available online solver. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses the system-level proxy, if defined, or connects directly to the API. legacy_config_fallback (bool, default=False): If True and loading from a standard D-Wave Cloud Client configuration file (``dwave.conf``) fails, tries loading a legacy configuration file (``~/.dwrc``). Other Parameters: Unrecognized keys (str): All unrecognized keys are passed through to the appropriate client class constructor as string keyword arguments. An explicit key value overrides an identical user-defined key value loaded from a configuration file. Returns: :class:`~dwave.cloud.client.Client` (:class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`, default=:class:`dwave.cloud.qpu.Client`): Appropriate instance of a QPU or software client. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples: A variety of examples are given in :mod:`dwave.cloud.config`. This example initializes :class:`~dwave.cloud.client.Client` from an explicitly specified configuration file, "~/jane/my_path_to_config/my_cloud_conf.conf":: >>> from dwave.cloud import Client >>> client = Client.from_config(config_file='~/jane/my_path_to_config/my_cloud_conf.conf') # doctest: +SKIP >>> # code that uses client >>> client.close() """ # try loading configuration from a preferred new config subsystem # (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc) config = load_config( config_file=config_file, profile=profile, client=client, endpoint=endpoint, token=token, solver=solver, proxy=proxy) _LOGGER.debug("Config loaded: %r", config) # fallback to legacy `.dwrc` if key variables missing if legacy_config_fallback: warnings.warn("'legacy_config_fallback' is deprecated, please convert " "your legacy .dwrc file to the new config format.", DeprecationWarning) if not config.get('token'): config = legacy_load_config( profile=profile, client=client, endpoint=endpoint, token=token, solver=solver, proxy=proxy) _LOGGER.debug("Legacy config loaded: %r", config) # manual override of other (client-custom) arguments config.update(kwargs) from dwave.cloud import qpu, sw _clients = {'qpu': qpu.Client, 'sw': sw.Client, 'base': cls} _client = config.pop('client', None) or 'base' _LOGGER.debug("Final config used for %s.Client(): %r", _client, config) return _clients[_client](**config)
python
def from_config(cls, config_file=None, profile=None, client=None, endpoint=None, token=None, solver=None, proxy=None, legacy_config_fallback=False, **kwargs): """Client factory method to instantiate a client instance from configuration. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`from_config()` 2. Values specified as environment variables 3. Values specified in the configuration file Configuration-file format is described in :mod:`dwave.cloud.config`. If the location of the configuration file is not specified, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If a configuration file explicitly specified, via an argument or environment variable, does not exist or is unreadable, loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is readable but invalid as a configuration file. Similarly, if a profile explicitly specified, via an argument or environment variable, is not present in the loaded configuration, loading fails with :exc:`ValueError`. Explicit profile selection also fails if the configuration file is not explicitly specified, detected on the system, or defined via an environment variable. Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``. Environment variables are described in :mod:`dwave.cloud.config`. Args: config_file (str/[str]/None/False/True, default=None): Path to configuration file. If ``None``, the value is taken from ``DWAVE_CONFIG_FILE`` environment variable if defined. If the environment variable is undefined or empty, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If ``False``, loading from file is skipped; if ``True``, forces auto-detection (regardless of the ``DWAVE_CONFIG_FILE`` environment variable). profile (str, default=None): Profile name (name of the profile section in the configuration file). If undefined, inferred from ``DWAVE_PROFILE`` environment variable if defined. If the environment variable is undefined or empty, a profile is selected in the following order: 1. From the default section if it includes a profile key. 2. The first section (after the default section). 3. If no other section is defined besides ``[defaults]``, the defaults section is promoted and selected. client (str, default=None): Client type used for accessing the API. Supported values are ``qpu`` for :class:`dwave.cloud.qpu.Client` and ``sw`` for :class:`dwave.cloud.sw.Client`. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (dict/str, default=None): Default :term:`solver` features to use in :meth:`~dwave.cloud.client.Client.get_solver`. Defined via dictionary of solver feature constraints (see :meth:`~dwave.cloud.client.Client.get_solvers`). For backward compatibility, a solver name, as a string, is also accepted and converted to ``{"name": <solver name>}``. If undefined, :meth:`~dwave.cloud.client.Client.get_solver` uses a solver definition from environment variables, a configuration file, or falls back to the first available online solver. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses the system-level proxy, if defined, or connects directly to the API. legacy_config_fallback (bool, default=False): If True and loading from a standard D-Wave Cloud Client configuration file (``dwave.conf``) fails, tries loading a legacy configuration file (``~/.dwrc``). Other Parameters: Unrecognized keys (str): All unrecognized keys are passed through to the appropriate client class constructor as string keyword arguments. An explicit key value overrides an identical user-defined key value loaded from a configuration file. Returns: :class:`~dwave.cloud.client.Client` (:class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`, default=:class:`dwave.cloud.qpu.Client`): Appropriate instance of a QPU or software client. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples: A variety of examples are given in :mod:`dwave.cloud.config`. This example initializes :class:`~dwave.cloud.client.Client` from an explicitly specified configuration file, "~/jane/my_path_to_config/my_cloud_conf.conf":: >>> from dwave.cloud import Client >>> client = Client.from_config(config_file='~/jane/my_path_to_config/my_cloud_conf.conf') # doctest: +SKIP >>> # code that uses client >>> client.close() """ # try loading configuration from a preferred new config subsystem # (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc) config = load_config( config_file=config_file, profile=profile, client=client, endpoint=endpoint, token=token, solver=solver, proxy=proxy) _LOGGER.debug("Config loaded: %r", config) # fallback to legacy `.dwrc` if key variables missing if legacy_config_fallback: warnings.warn("'legacy_config_fallback' is deprecated, please convert " "your legacy .dwrc file to the new config format.", DeprecationWarning) if not config.get('token'): config = legacy_load_config( profile=profile, client=client, endpoint=endpoint, token=token, solver=solver, proxy=proxy) _LOGGER.debug("Legacy config loaded: %r", config) # manual override of other (client-custom) arguments config.update(kwargs) from dwave.cloud import qpu, sw _clients = {'qpu': qpu.Client, 'sw': sw.Client, 'base': cls} _client = config.pop('client', None) or 'base' _LOGGER.debug("Final config used for %s.Client(): %r", _client, config) return _clients[_client](**config)
[ "def", "from_config", "(", "cls", ",", "config_file", "=", "None", ",", "profile", "=", "None", ",", "client", "=", "None", ",", "endpoint", "=", "None", ",", "token", "=", "None", ",", "solver", "=", "None", ",", "proxy", "=", "None", ",", "legacy_config_fallback", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# try loading configuration from a preferred new config subsystem", "# (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc)", "config", "=", "load_config", "(", "config_file", "=", "config_file", ",", "profile", "=", "profile", ",", "client", "=", "client", ",", "endpoint", "=", "endpoint", ",", "token", "=", "token", ",", "solver", "=", "solver", ",", "proxy", "=", "proxy", ")", "_LOGGER", ".", "debug", "(", "\"Config loaded: %r\"", ",", "config", ")", "# fallback to legacy `.dwrc` if key variables missing", "if", "legacy_config_fallback", ":", "warnings", ".", "warn", "(", "\"'legacy_config_fallback' is deprecated, please convert \"", "\"your legacy .dwrc file to the new config format.\"", ",", "DeprecationWarning", ")", "if", "not", "config", ".", "get", "(", "'token'", ")", ":", "config", "=", "legacy_load_config", "(", "profile", "=", "profile", ",", "client", "=", "client", ",", "endpoint", "=", "endpoint", ",", "token", "=", "token", ",", "solver", "=", "solver", ",", "proxy", "=", "proxy", ")", "_LOGGER", ".", "debug", "(", "\"Legacy config loaded: %r\"", ",", "config", ")", "# manual override of other (client-custom) arguments", "config", ".", "update", "(", "kwargs", ")", "from", "dwave", ".", "cloud", "import", "qpu", ",", "sw", "_clients", "=", "{", "'qpu'", ":", "qpu", ".", "Client", ",", "'sw'", ":", "sw", ".", "Client", ",", "'base'", ":", "cls", "}", "_client", "=", "config", ".", "pop", "(", "'client'", ",", "None", ")", "or", "'base'", "_LOGGER", ".", "debug", "(", "\"Final config used for %s.Client(): %r\"", ",", "_client", ",", "config", ")", "return", "_clients", "[", "_client", "]", "(", "*", "*", "config", ")" ]
Client factory method to instantiate a client instance from configuration. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`from_config()` 2. Values specified as environment variables 3. Values specified in the configuration file Configuration-file format is described in :mod:`dwave.cloud.config`. If the location of the configuration file is not specified, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If a configuration file explicitly specified, via an argument or environment variable, does not exist or is unreadable, loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is readable but invalid as a configuration file. Similarly, if a profile explicitly specified, via an argument or environment variable, is not present in the loaded configuration, loading fails with :exc:`ValueError`. Explicit profile selection also fails if the configuration file is not explicitly specified, detected on the system, or defined via an environment variable. Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``. Environment variables are described in :mod:`dwave.cloud.config`. Args: config_file (str/[str]/None/False/True, default=None): Path to configuration file. If ``None``, the value is taken from ``DWAVE_CONFIG_FILE`` environment variable if defined. If the environment variable is undefined or empty, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If ``False``, loading from file is skipped; if ``True``, forces auto-detection (regardless of the ``DWAVE_CONFIG_FILE`` environment variable). profile (str, default=None): Profile name (name of the profile section in the configuration file). If undefined, inferred from ``DWAVE_PROFILE`` environment variable if defined. If the environment variable is undefined or empty, a profile is selected in the following order: 1. From the default section if it includes a profile key. 2. The first section (after the default section). 3. If no other section is defined besides ``[defaults]``, the defaults section is promoted and selected. client (str, default=None): Client type used for accessing the API. Supported values are ``qpu`` for :class:`dwave.cloud.qpu.Client` and ``sw`` for :class:`dwave.cloud.sw.Client`. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (dict/str, default=None): Default :term:`solver` features to use in :meth:`~dwave.cloud.client.Client.get_solver`. Defined via dictionary of solver feature constraints (see :meth:`~dwave.cloud.client.Client.get_solvers`). For backward compatibility, a solver name, as a string, is also accepted and converted to ``{"name": <solver name>}``. If undefined, :meth:`~dwave.cloud.client.Client.get_solver` uses a solver definition from environment variables, a configuration file, or falls back to the first available online solver. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses the system-level proxy, if defined, or connects directly to the API. legacy_config_fallback (bool, default=False): If True and loading from a standard D-Wave Cloud Client configuration file (``dwave.conf``) fails, tries loading a legacy configuration file (``~/.dwrc``). Other Parameters: Unrecognized keys (str): All unrecognized keys are passed through to the appropriate client class constructor as string keyword arguments. An explicit key value overrides an identical user-defined key value loaded from a configuration file. Returns: :class:`~dwave.cloud.client.Client` (:class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`, default=:class:`dwave.cloud.qpu.Client`): Appropriate instance of a QPU or software client. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples: A variety of examples are given in :mod:`dwave.cloud.config`. This example initializes :class:`~dwave.cloud.client.Client` from an explicitly specified configuration file, "~/jane/my_path_to_config/my_cloud_conf.conf":: >>> from dwave.cloud import Client >>> client = Client.from_config(config_file='~/jane/my_path_to_config/my_cloud_conf.conf') # doctest: +SKIP >>> # code that uses client >>> client.close()
[ "Client", "factory", "method", "to", "instantiate", "a", "client", "instance", "from", "configuration", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L168-L318
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client.close
def close(self): """Perform a clean shutdown. Waits for all the currently scheduled work to finish, kills the workers, and closes the connection pool. .. note:: Ensure your code does not submit new work while the connection is closing. Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as` construct) to ensure your code properly closes all resources. Examples: This example creates a client (based on an auto-detected configuration file), executes some code (represented by a placeholder comment), and then closes the client. >>> from dwave.cloud import Client >>> client = Client.from_config() >>> # code that uses client >>> client.close() """ # Finish all the work that requires the connection _LOGGER.debug("Joining submission queue") self._submission_queue.join() _LOGGER.debug("Joining cancel queue") self._cancel_queue.join() _LOGGER.debug("Joining poll queue") self._poll_queue.join() _LOGGER.debug("Joining load queue") self._load_queue.join() # Send kill-task to all worker threads # Note: threads can't be 'killed' in Python, they have to die by # natural causes for _ in self._submission_workers: self._submission_queue.put(None) for _ in self._cancel_workers: self._cancel_queue.put(None) for _ in self._poll_workers: self._poll_queue.put((-1, None)) for _ in self._load_workers: self._load_queue.put(None) # Wait for threads to die for worker in chain(self._submission_workers, self._cancel_workers, self._poll_workers, self._load_workers): worker.join() # Close the requests session self.session.close()
python
def close(self): """Perform a clean shutdown. Waits for all the currently scheduled work to finish, kills the workers, and closes the connection pool. .. note:: Ensure your code does not submit new work while the connection is closing. Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as` construct) to ensure your code properly closes all resources. Examples: This example creates a client (based on an auto-detected configuration file), executes some code (represented by a placeholder comment), and then closes the client. >>> from dwave.cloud import Client >>> client = Client.from_config() >>> # code that uses client >>> client.close() """ # Finish all the work that requires the connection _LOGGER.debug("Joining submission queue") self._submission_queue.join() _LOGGER.debug("Joining cancel queue") self._cancel_queue.join() _LOGGER.debug("Joining poll queue") self._poll_queue.join() _LOGGER.debug("Joining load queue") self._load_queue.join() # Send kill-task to all worker threads # Note: threads can't be 'killed' in Python, they have to die by # natural causes for _ in self._submission_workers: self._submission_queue.put(None) for _ in self._cancel_workers: self._cancel_queue.put(None) for _ in self._poll_workers: self._poll_queue.put((-1, None)) for _ in self._load_workers: self._load_queue.put(None) # Wait for threads to die for worker in chain(self._submission_workers, self._cancel_workers, self._poll_workers, self._load_workers): worker.join() # Close the requests session self.session.close()
[ "def", "close", "(", "self", ")", ":", "# Finish all the work that requires the connection", "_LOGGER", ".", "debug", "(", "\"Joining submission queue\"", ")", "self", ".", "_submission_queue", ".", "join", "(", ")", "_LOGGER", ".", "debug", "(", "\"Joining cancel queue\"", ")", "self", ".", "_cancel_queue", ".", "join", "(", ")", "_LOGGER", ".", "debug", "(", "\"Joining poll queue\"", ")", "self", ".", "_poll_queue", ".", "join", "(", ")", "_LOGGER", ".", "debug", "(", "\"Joining load queue\"", ")", "self", ".", "_load_queue", ".", "join", "(", ")", "# Send kill-task to all worker threads", "# Note: threads can't be 'killed' in Python, they have to die by", "# natural causes", "for", "_", "in", "self", ".", "_submission_workers", ":", "self", ".", "_submission_queue", ".", "put", "(", "None", ")", "for", "_", "in", "self", ".", "_cancel_workers", ":", "self", ".", "_cancel_queue", ".", "put", "(", "None", ")", "for", "_", "in", "self", ".", "_poll_workers", ":", "self", ".", "_poll_queue", ".", "put", "(", "(", "-", "1", ",", "None", ")", ")", "for", "_", "in", "self", ".", "_load_workers", ":", "self", ".", "_load_queue", ".", "put", "(", "None", ")", "# Wait for threads to die", "for", "worker", "in", "chain", "(", "self", ".", "_submission_workers", ",", "self", ".", "_cancel_workers", ",", "self", ".", "_poll_workers", ",", "self", ".", "_load_workers", ")", ":", "worker", ".", "join", "(", ")", "# Close the requests session", "self", ".", "session", ".", "close", "(", ")" ]
Perform a clean shutdown. Waits for all the currently scheduled work to finish, kills the workers, and closes the connection pool. .. note:: Ensure your code does not submit new work while the connection is closing. Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as` construct) to ensure your code properly closes all resources. Examples: This example creates a client (based on an auto-detected configuration file), executes some code (represented by a placeholder comment), and then closes the client. >>> from dwave.cloud import Client >>> client = Client.from_config() >>> # code that uses client >>> client.close()
[ "Perform", "a", "clean", "shutdown", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L427-L476
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client.get_solvers
def get_solvers(self, refresh=False, order_by='avg_load', **filters): """Return a filtered list of solvers handled by this client. Args: refresh (bool, default=False): Force refresh of cached list of solvers/properties. order_by (callable/str/None, default='avg_load'): Solver sorting key function (or :class:`Solver` attribute/item dot-separated path). By default, solvers are sorted by average load. To explicitly not sort the solvers (and use the API-returned order), set ``order_by=None``. Signature of the `key` `callable` is:: key :: (Solver s, Ord k) => s -> k Basic structure of the `key` string path is:: "-"? (attr|item) ( "." (attr|item) )* For example, to use solver property named ``max_anneal_schedule_points``, available in ``Solver.properties`` dict, you can either specify a callable `key`:: key=lambda solver: solver.properties['max_anneal_schedule_points'] or, you can use a short string path based key:: key='properties.max_anneal_schedule_points' Solver derived properties, available as :class:`Solver` properties can also be used (e.g. ``num_active_qubits``, ``online``, ``avg_load``, etc). Ascending sort order is implied, unless the key string path does not start with ``-``, in which case descending sort is used. Note: the sort used for ordering solvers by `key` is **stable**, meaning that if multiple solvers have the same value for the key, their relative order is preserved, and effectively they are in the same order as returned by the API. Note: solvers with ``None`` for key appear last in the list of solvers. When providing a key callable, ensure all values returned are of the same type (particularly in Python 3). For solvers with undefined key value, return ``None``. **filters: See `Filtering forms` and `Operators` below. Solver filters are defined, similarly to Django QuerySet filters, with keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`. Each `<operator>` is a predicate (boolean) function that acts on two arguments: value of feature `<name>` (described with keys path `<key1.key2...keyN>`) and the required `<value>`. Feature `<name>` can be: 1) a derived solver property, available as an identically named :class:`Solver`'s property (`name`, `qpu`, `software`, `online`, `num_active_qubits`, `avg_load`) 2) a solver parameter, available in :obj:`Solver.parameters` 3) a solver property, available in :obj:`Solver.properties` 4) a path describing a property in nested dictionaries Filtering forms are: * <derived_property>__<operator> (object <value>) * <derived_property> (bool) This form ensures the value of solver's property bound to `derived_property`, after applying `operator` equals the `value`. The default operator is `eq`. For example:: >>> client.get_solvers(avg_load__gt=0.5) but also:: >>> client.get_solvers(online=True) >>> # identical to: >>> client.get_solvers(online__eq=True) * <parameter>__<operator> (object <value>) * <parameter> (bool) This form ensures that the solver supports `parameter`. General operator form can be used but usually does not make sense for parameters, since values are human-readable descriptions. The default operator is `available`. Example:: >>> client.get_solvers(flux_biases=True) >>> # identical to: >>> client.get_solvers(flux_biases__available=True) * <property>__<operator> (object <value>) * <property> (bool) This form ensures the value of the solver's `property`, after applying `operator` equals the righthand side `value`. The default operator is `eq`. Note: if a non-existing parameter/property name/key given, the default operator is `eq`. Operators are: * `available` (<name>: str, <value>: bool): Test availability of <name> feature. * `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any): Standard relational operators that compare feature <name> value with <value>. * `regex` (<name>: str, <value>: str): Test regular expression matching feature value. * `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list): Test feature <name> value (which should be a *range*) covers a given value or a subrange. * `within` (<name>: str, <value>: range expressed as 2-tuple/list): Test feature <name> value (which can be a *single value* or a *range*) is within a given range. * `in` (<name>: str, <value>: container type): Test feature <name> value is *in* <value> container. * `contains` (<name>: str, <value>: any): Test feature <name> value (container type) *contains* <value>. * `issubset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a subset of <value>. * `issuperset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a superset of <value>. Derived properies are: * `name` (str): Solver name/id. * `qpu` (bool): Is solver QPU based? * `software` (bool): Is solver software based? * `online` (bool, default=True): Is solver online? * `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`. * `avg_load` (float): Solver's average load (similar to Unix load average). Common solver parameters are: * `flux_biases`: Should solver accept flux biases? * `anneal_schedule`: Should solver accept anneal schedule? Common solver properties are: * `num_qubits` (int): Number of qubits available. * `vfyc` (bool): Should solver work on "virtual full-yield chip"? * `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points. * `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range. * `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter. Returns: list[Solver]: List of all solvers that satisfy the conditions. Note: Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`) already filter solvers by resource type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers` on base class :class:`~dwave.cloud.client.Client`. Examples:: client.get_solvers( num_qubits__gt=2000, # we need more than 2000 qubits num_qubits__lt=4000, # ... but fewer than 4000 qubits num_qubits__within=(2000, 4000), # an alternative to the previous two lines num_active_qubits=1089, # we want a particular number of active qubits vfyc=True, # we require a fully yielded Chimera vfyc__in=[False, None], # inverse of the previous filter vfyc__available=False, # we want solvers that do not advertize the vfyc property anneal_schedule=True, # we need support for custom anneal schedule max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule num_reads_range__covers=1000, # our solver must support returning 1000 reads extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2] couplings__contains=[0, 128], # coupling (edge between) qubits (0,128) must exist couplings__issuperset=[[0,128], [0,4]], # two couplings required: (0,128) and (0,4) qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist supported_problem_types__issubset={'ising', 'qubo'}, # require Ising, QUBO or both to be supported name='DW_2000Q_3', # full solver name/ID match name__regex='.*2000.*', # partial/regex-based solver name match chip_id__regex='DW_.*', # chip ID prefix must be DW_ topology__type__eq="chimera" # topology.type must be chimera ) """ def covers_op(prop, val): """Does LHS `prop` (range) fully cover RHS `val` (range or item)?""" # `prop` must be a 2-element list/tuple range. if not isinstance(prop, (list, tuple)) or not len(prop) == 2: raise ValueError("2-element list/tuple range required for LHS value") llo, lhi = min(prop), max(prop) # `val` can be a single value, or a range (2-list/2-tuple). if isinstance(val, (list, tuple)) and len(val) == 2: # val range within prop range? rlo, rhi = min(val), max(val) return llo <= rlo and lhi >= rhi else: # val item within prop range? return llo <= val <= lhi def within_op(prop, val): """Is LHS `prop` (range or item) fully covered by RHS `val` (range)?""" try: return covers_op(val, prop) except ValueError: raise ValueError("2-element list/tuple range required for RHS value") def _set(iterable): """Like set(iterable), but works for lists as items in iterable. Before constructing a set, lists are converted to tuples. """ first = next(iter(iterable)) if isinstance(first, list): return set(tuple(x) for x in iterable) return set(iterable) def with_valid_lhs(op): @wraps(op) def _wrapper(prop, val): if prop is None: return False return op(prop, val) return _wrapper # available filtering operators ops = { 'lt': with_valid_lhs(operator.lt), 'lte': with_valid_lhs(operator.le), 'gt': with_valid_lhs(operator.gt), 'gte': with_valid_lhs(operator.ge), 'eq': operator.eq, 'available': lambda prop, val: prop is not None if val else prop is None, 'regex': with_valid_lhs(lambda prop, val: re.match("^{}$".format(val), prop)), # range operations 'covers': with_valid_lhs(covers_op), 'within': with_valid_lhs(within_op), # membership tests 'in': lambda prop, val: prop in val, 'contains': with_valid_lhs(lambda prop, val: val in prop), # set tests 'issubset': with_valid_lhs(lambda prop, val: _set(prop).issubset(_set(val))), 'issuperset': with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))), } def predicate(solver, query, val): # needs to handle kwargs like these: # key=val # key__op=val # key__key=val # key__key__op=val # LHS is split on __ in `query` assert len(query) >= 1 potential_path, potential_op_name = query[:-1], query[-1] if potential_op_name in ops: # op is explicit, and potential path is correct op_name = potential_op_name else: # op is implied and depends on property type, path is the whole query op_name = None potential_path = query path = '.'.join(potential_path) if path in solver.derived_properties: op = ops[op_name or 'eq'] return op(getattr(solver, path), val) elif pluck(solver.parameters, path, None) is not None: op = ops[op_name or 'available'] return op(pluck(solver.parameters, path), val) elif pluck(solver.properties, path, None) is not None: op = ops[op_name or 'eq'] return op(pluck(solver.properties, path), val) else: op = ops[op_name or 'eq'] return op(None, val) # param validation sort_reverse = False if not order_by: sort_key = None elif isinstance(order_by, six.string_types): if order_by[0] == '-': sort_reverse = True order_by = order_by[1:] if not order_by: sort_key = None else: sort_key = lambda solver: pluck(solver, order_by, None) elif callable(order_by): sort_key = order_by else: raise TypeError("expected string or callable for 'order_by'") # default filters: filters.setdefault('online', True) predicates = [] for lhs, val in filters.items(): query = lhs.split('__') predicates.append(partial(predicate, query=query, val=val)) _LOGGER.debug("Filtering solvers with predicates=%r", predicates) # optimization for case when exact solver name/id is known: # we can fetch only that solver # NOTE: in future, complete feature-based filtering will be on server-side query = dict(refresh_=refresh) if 'name' in filters: query['name'] = filters['name'] if 'name__eq' in filters: query['name'] = filters['name__eq'] # filter solvers = self._fetch_solvers(**query) solvers = [s for s in solvers if all(p(s) for p in predicates)] # sort: undefined (None) key values go last if sort_key is not None: solvers_with_keys = [(sort_key(solver), solver) for solver in solvers] solvers_with_invalid_keys = [(key, solver) for key, solver in solvers_with_keys if key is None] solvers_with_valid_keys = [(key, solver) for key, solver in solvers_with_keys if key is not None] solvers_with_valid_keys.sort(key=operator.itemgetter(0)) solvers = [solver for key, solver in chain(solvers_with_valid_keys, solvers_with_invalid_keys)] # reverse if necessary (as a separate step from sorting, so it works for invalid keys # and plain list reverse without sorting) if sort_reverse: solvers.reverse() return solvers
python
def get_solvers(self, refresh=False, order_by='avg_load', **filters): """Return a filtered list of solvers handled by this client. Args: refresh (bool, default=False): Force refresh of cached list of solvers/properties. order_by (callable/str/None, default='avg_load'): Solver sorting key function (or :class:`Solver` attribute/item dot-separated path). By default, solvers are sorted by average load. To explicitly not sort the solvers (and use the API-returned order), set ``order_by=None``. Signature of the `key` `callable` is:: key :: (Solver s, Ord k) => s -> k Basic structure of the `key` string path is:: "-"? (attr|item) ( "." (attr|item) )* For example, to use solver property named ``max_anneal_schedule_points``, available in ``Solver.properties`` dict, you can either specify a callable `key`:: key=lambda solver: solver.properties['max_anneal_schedule_points'] or, you can use a short string path based key:: key='properties.max_anneal_schedule_points' Solver derived properties, available as :class:`Solver` properties can also be used (e.g. ``num_active_qubits``, ``online``, ``avg_load``, etc). Ascending sort order is implied, unless the key string path does not start with ``-``, in which case descending sort is used. Note: the sort used for ordering solvers by `key` is **stable**, meaning that if multiple solvers have the same value for the key, their relative order is preserved, and effectively they are in the same order as returned by the API. Note: solvers with ``None`` for key appear last in the list of solvers. When providing a key callable, ensure all values returned are of the same type (particularly in Python 3). For solvers with undefined key value, return ``None``. **filters: See `Filtering forms` and `Operators` below. Solver filters are defined, similarly to Django QuerySet filters, with keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`. Each `<operator>` is a predicate (boolean) function that acts on two arguments: value of feature `<name>` (described with keys path `<key1.key2...keyN>`) and the required `<value>`. Feature `<name>` can be: 1) a derived solver property, available as an identically named :class:`Solver`'s property (`name`, `qpu`, `software`, `online`, `num_active_qubits`, `avg_load`) 2) a solver parameter, available in :obj:`Solver.parameters` 3) a solver property, available in :obj:`Solver.properties` 4) a path describing a property in nested dictionaries Filtering forms are: * <derived_property>__<operator> (object <value>) * <derived_property> (bool) This form ensures the value of solver's property bound to `derived_property`, after applying `operator` equals the `value`. The default operator is `eq`. For example:: >>> client.get_solvers(avg_load__gt=0.5) but also:: >>> client.get_solvers(online=True) >>> # identical to: >>> client.get_solvers(online__eq=True) * <parameter>__<operator> (object <value>) * <parameter> (bool) This form ensures that the solver supports `parameter`. General operator form can be used but usually does not make sense for parameters, since values are human-readable descriptions. The default operator is `available`. Example:: >>> client.get_solvers(flux_biases=True) >>> # identical to: >>> client.get_solvers(flux_biases__available=True) * <property>__<operator> (object <value>) * <property> (bool) This form ensures the value of the solver's `property`, after applying `operator` equals the righthand side `value`. The default operator is `eq`. Note: if a non-existing parameter/property name/key given, the default operator is `eq`. Operators are: * `available` (<name>: str, <value>: bool): Test availability of <name> feature. * `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any): Standard relational operators that compare feature <name> value with <value>. * `regex` (<name>: str, <value>: str): Test regular expression matching feature value. * `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list): Test feature <name> value (which should be a *range*) covers a given value or a subrange. * `within` (<name>: str, <value>: range expressed as 2-tuple/list): Test feature <name> value (which can be a *single value* or a *range*) is within a given range. * `in` (<name>: str, <value>: container type): Test feature <name> value is *in* <value> container. * `contains` (<name>: str, <value>: any): Test feature <name> value (container type) *contains* <value>. * `issubset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a subset of <value>. * `issuperset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a superset of <value>. Derived properies are: * `name` (str): Solver name/id. * `qpu` (bool): Is solver QPU based? * `software` (bool): Is solver software based? * `online` (bool, default=True): Is solver online? * `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`. * `avg_load` (float): Solver's average load (similar to Unix load average). Common solver parameters are: * `flux_biases`: Should solver accept flux biases? * `anneal_schedule`: Should solver accept anneal schedule? Common solver properties are: * `num_qubits` (int): Number of qubits available. * `vfyc` (bool): Should solver work on "virtual full-yield chip"? * `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points. * `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range. * `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter. Returns: list[Solver]: List of all solvers that satisfy the conditions. Note: Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`) already filter solvers by resource type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers` on base class :class:`~dwave.cloud.client.Client`. Examples:: client.get_solvers( num_qubits__gt=2000, # we need more than 2000 qubits num_qubits__lt=4000, # ... but fewer than 4000 qubits num_qubits__within=(2000, 4000), # an alternative to the previous two lines num_active_qubits=1089, # we want a particular number of active qubits vfyc=True, # we require a fully yielded Chimera vfyc__in=[False, None], # inverse of the previous filter vfyc__available=False, # we want solvers that do not advertize the vfyc property anneal_schedule=True, # we need support for custom anneal schedule max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule num_reads_range__covers=1000, # our solver must support returning 1000 reads extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2] couplings__contains=[0, 128], # coupling (edge between) qubits (0,128) must exist couplings__issuperset=[[0,128], [0,4]], # two couplings required: (0,128) and (0,4) qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist supported_problem_types__issubset={'ising', 'qubo'}, # require Ising, QUBO or both to be supported name='DW_2000Q_3', # full solver name/ID match name__regex='.*2000.*', # partial/regex-based solver name match chip_id__regex='DW_.*', # chip ID prefix must be DW_ topology__type__eq="chimera" # topology.type must be chimera ) """ def covers_op(prop, val): """Does LHS `prop` (range) fully cover RHS `val` (range or item)?""" # `prop` must be a 2-element list/tuple range. if not isinstance(prop, (list, tuple)) or not len(prop) == 2: raise ValueError("2-element list/tuple range required for LHS value") llo, lhi = min(prop), max(prop) # `val` can be a single value, or a range (2-list/2-tuple). if isinstance(val, (list, tuple)) and len(val) == 2: # val range within prop range? rlo, rhi = min(val), max(val) return llo <= rlo and lhi >= rhi else: # val item within prop range? return llo <= val <= lhi def within_op(prop, val): """Is LHS `prop` (range or item) fully covered by RHS `val` (range)?""" try: return covers_op(val, prop) except ValueError: raise ValueError("2-element list/tuple range required for RHS value") def _set(iterable): """Like set(iterable), but works for lists as items in iterable. Before constructing a set, lists are converted to tuples. """ first = next(iter(iterable)) if isinstance(first, list): return set(tuple(x) for x in iterable) return set(iterable) def with_valid_lhs(op): @wraps(op) def _wrapper(prop, val): if prop is None: return False return op(prop, val) return _wrapper # available filtering operators ops = { 'lt': with_valid_lhs(operator.lt), 'lte': with_valid_lhs(operator.le), 'gt': with_valid_lhs(operator.gt), 'gte': with_valid_lhs(operator.ge), 'eq': operator.eq, 'available': lambda prop, val: prop is not None if val else prop is None, 'regex': with_valid_lhs(lambda prop, val: re.match("^{}$".format(val), prop)), # range operations 'covers': with_valid_lhs(covers_op), 'within': with_valid_lhs(within_op), # membership tests 'in': lambda prop, val: prop in val, 'contains': with_valid_lhs(lambda prop, val: val in prop), # set tests 'issubset': with_valid_lhs(lambda prop, val: _set(prop).issubset(_set(val))), 'issuperset': with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))), } def predicate(solver, query, val): # needs to handle kwargs like these: # key=val # key__op=val # key__key=val # key__key__op=val # LHS is split on __ in `query` assert len(query) >= 1 potential_path, potential_op_name = query[:-1], query[-1] if potential_op_name in ops: # op is explicit, and potential path is correct op_name = potential_op_name else: # op is implied and depends on property type, path is the whole query op_name = None potential_path = query path = '.'.join(potential_path) if path in solver.derived_properties: op = ops[op_name or 'eq'] return op(getattr(solver, path), val) elif pluck(solver.parameters, path, None) is not None: op = ops[op_name or 'available'] return op(pluck(solver.parameters, path), val) elif pluck(solver.properties, path, None) is not None: op = ops[op_name or 'eq'] return op(pluck(solver.properties, path), val) else: op = ops[op_name or 'eq'] return op(None, val) # param validation sort_reverse = False if not order_by: sort_key = None elif isinstance(order_by, six.string_types): if order_by[0] == '-': sort_reverse = True order_by = order_by[1:] if not order_by: sort_key = None else: sort_key = lambda solver: pluck(solver, order_by, None) elif callable(order_by): sort_key = order_by else: raise TypeError("expected string or callable for 'order_by'") # default filters: filters.setdefault('online', True) predicates = [] for lhs, val in filters.items(): query = lhs.split('__') predicates.append(partial(predicate, query=query, val=val)) _LOGGER.debug("Filtering solvers with predicates=%r", predicates) # optimization for case when exact solver name/id is known: # we can fetch only that solver # NOTE: in future, complete feature-based filtering will be on server-side query = dict(refresh_=refresh) if 'name' in filters: query['name'] = filters['name'] if 'name__eq' in filters: query['name'] = filters['name__eq'] # filter solvers = self._fetch_solvers(**query) solvers = [s for s in solvers if all(p(s) for p in predicates)] # sort: undefined (None) key values go last if sort_key is not None: solvers_with_keys = [(sort_key(solver), solver) for solver in solvers] solvers_with_invalid_keys = [(key, solver) for key, solver in solvers_with_keys if key is None] solvers_with_valid_keys = [(key, solver) for key, solver in solvers_with_keys if key is not None] solvers_with_valid_keys.sort(key=operator.itemgetter(0)) solvers = [solver for key, solver in chain(solvers_with_valid_keys, solvers_with_invalid_keys)] # reverse if necessary (as a separate step from sorting, so it works for invalid keys # and plain list reverse without sorting) if sort_reverse: solvers.reverse() return solvers
[ "def", "get_solvers", "(", "self", ",", "refresh", "=", "False", ",", "order_by", "=", "'avg_load'", ",", "*", "*", "filters", ")", ":", "def", "covers_op", "(", "prop", ",", "val", ")", ":", "\"\"\"Does LHS `prop` (range) fully cover RHS `val` (range or item)?\"\"\"", "# `prop` must be a 2-element list/tuple range.", "if", "not", "isinstance", "(", "prop", ",", "(", "list", ",", "tuple", ")", ")", "or", "not", "len", "(", "prop", ")", "==", "2", ":", "raise", "ValueError", "(", "\"2-element list/tuple range required for LHS value\"", ")", "llo", ",", "lhi", "=", "min", "(", "prop", ")", ",", "max", "(", "prop", ")", "# `val` can be a single value, or a range (2-list/2-tuple).", "if", "isinstance", "(", "val", ",", "(", "list", ",", "tuple", ")", ")", "and", "len", "(", "val", ")", "==", "2", ":", "# val range within prop range?", "rlo", ",", "rhi", "=", "min", "(", "val", ")", ",", "max", "(", "val", ")", "return", "llo", "<=", "rlo", "and", "lhi", ">=", "rhi", "else", ":", "# val item within prop range?", "return", "llo", "<=", "val", "<=", "lhi", "def", "within_op", "(", "prop", ",", "val", ")", ":", "\"\"\"Is LHS `prop` (range or item) fully covered by RHS `val` (range)?\"\"\"", "try", ":", "return", "covers_op", "(", "val", ",", "prop", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"2-element list/tuple range required for RHS value\"", ")", "def", "_set", "(", "iterable", ")", ":", "\"\"\"Like set(iterable), but works for lists as items in iterable.\n Before constructing a set, lists are converted to tuples.\n \"\"\"", "first", "=", "next", "(", "iter", "(", "iterable", ")", ")", "if", "isinstance", "(", "first", ",", "list", ")", ":", "return", "set", "(", "tuple", "(", "x", ")", "for", "x", "in", "iterable", ")", "return", "set", "(", "iterable", ")", "def", "with_valid_lhs", "(", "op", ")", ":", "@", "wraps", "(", "op", ")", "def", "_wrapper", "(", "prop", ",", "val", ")", ":", "if", "prop", "is", "None", ":", "return", "False", "return", "op", "(", "prop", ",", "val", ")", "return", "_wrapper", "# available filtering operators", "ops", "=", "{", "'lt'", ":", "with_valid_lhs", "(", "operator", ".", "lt", ")", ",", "'lte'", ":", "with_valid_lhs", "(", "operator", ".", "le", ")", ",", "'gt'", ":", "with_valid_lhs", "(", "operator", ".", "gt", ")", ",", "'gte'", ":", "with_valid_lhs", "(", "operator", ".", "ge", ")", ",", "'eq'", ":", "operator", ".", "eq", ",", "'available'", ":", "lambda", "prop", ",", "val", ":", "prop", "is", "not", "None", "if", "val", "else", "prop", "is", "None", ",", "'regex'", ":", "with_valid_lhs", "(", "lambda", "prop", ",", "val", ":", "re", ".", "match", "(", "\"^{}$\"", ".", "format", "(", "val", ")", ",", "prop", ")", ")", ",", "# range operations", "'covers'", ":", "with_valid_lhs", "(", "covers_op", ")", ",", "'within'", ":", "with_valid_lhs", "(", "within_op", ")", ",", "# membership tests", "'in'", ":", "lambda", "prop", ",", "val", ":", "prop", "in", "val", ",", "'contains'", ":", "with_valid_lhs", "(", "lambda", "prop", ",", "val", ":", "val", "in", "prop", ")", ",", "# set tests", "'issubset'", ":", "with_valid_lhs", "(", "lambda", "prop", ",", "val", ":", "_set", "(", "prop", ")", ".", "issubset", "(", "_set", "(", "val", ")", ")", ")", ",", "'issuperset'", ":", "with_valid_lhs", "(", "lambda", "prop", ",", "val", ":", "_set", "(", "prop", ")", ".", "issuperset", "(", "_set", "(", "val", ")", ")", ")", ",", "}", "def", "predicate", "(", "solver", ",", "query", ",", "val", ")", ":", "# needs to handle kwargs like these:", "# key=val", "# key__op=val", "# key__key=val", "# key__key__op=val", "# LHS is split on __ in `query`", "assert", "len", "(", "query", ")", ">=", "1", "potential_path", ",", "potential_op_name", "=", "query", "[", ":", "-", "1", "]", ",", "query", "[", "-", "1", "]", "if", "potential_op_name", "in", "ops", ":", "# op is explicit, and potential path is correct", "op_name", "=", "potential_op_name", "else", ":", "# op is implied and depends on property type, path is the whole query", "op_name", "=", "None", "potential_path", "=", "query", "path", "=", "'.'", ".", "join", "(", "potential_path", ")", "if", "path", "in", "solver", ".", "derived_properties", ":", "op", "=", "ops", "[", "op_name", "or", "'eq'", "]", "return", "op", "(", "getattr", "(", "solver", ",", "path", ")", ",", "val", ")", "elif", "pluck", "(", "solver", ".", "parameters", ",", "path", ",", "None", ")", "is", "not", "None", ":", "op", "=", "ops", "[", "op_name", "or", "'available'", "]", "return", "op", "(", "pluck", "(", "solver", ".", "parameters", ",", "path", ")", ",", "val", ")", "elif", "pluck", "(", "solver", ".", "properties", ",", "path", ",", "None", ")", "is", "not", "None", ":", "op", "=", "ops", "[", "op_name", "or", "'eq'", "]", "return", "op", "(", "pluck", "(", "solver", ".", "properties", ",", "path", ")", ",", "val", ")", "else", ":", "op", "=", "ops", "[", "op_name", "or", "'eq'", "]", "return", "op", "(", "None", ",", "val", ")", "# param validation", "sort_reverse", "=", "False", "if", "not", "order_by", ":", "sort_key", "=", "None", "elif", "isinstance", "(", "order_by", ",", "six", ".", "string_types", ")", ":", "if", "order_by", "[", "0", "]", "==", "'-'", ":", "sort_reverse", "=", "True", "order_by", "=", "order_by", "[", "1", ":", "]", "if", "not", "order_by", ":", "sort_key", "=", "None", "else", ":", "sort_key", "=", "lambda", "solver", ":", "pluck", "(", "solver", ",", "order_by", ",", "None", ")", "elif", "callable", "(", "order_by", ")", ":", "sort_key", "=", "order_by", "else", ":", "raise", "TypeError", "(", "\"expected string or callable for 'order_by'\"", ")", "# default filters:", "filters", ".", "setdefault", "(", "'online'", ",", "True", ")", "predicates", "=", "[", "]", "for", "lhs", ",", "val", "in", "filters", ".", "items", "(", ")", ":", "query", "=", "lhs", ".", "split", "(", "'__'", ")", "predicates", ".", "append", "(", "partial", "(", "predicate", ",", "query", "=", "query", ",", "val", "=", "val", ")", ")", "_LOGGER", ".", "debug", "(", "\"Filtering solvers with predicates=%r\"", ",", "predicates", ")", "# optimization for case when exact solver name/id is known:", "# we can fetch only that solver", "# NOTE: in future, complete feature-based filtering will be on server-side", "query", "=", "dict", "(", "refresh_", "=", "refresh", ")", "if", "'name'", "in", "filters", ":", "query", "[", "'name'", "]", "=", "filters", "[", "'name'", "]", "if", "'name__eq'", "in", "filters", ":", "query", "[", "'name'", "]", "=", "filters", "[", "'name__eq'", "]", "# filter", "solvers", "=", "self", ".", "_fetch_solvers", "(", "*", "*", "query", ")", "solvers", "=", "[", "s", "for", "s", "in", "solvers", "if", "all", "(", "p", "(", "s", ")", "for", "p", "in", "predicates", ")", "]", "# sort: undefined (None) key values go last", "if", "sort_key", "is", "not", "None", ":", "solvers_with_keys", "=", "[", "(", "sort_key", "(", "solver", ")", ",", "solver", ")", "for", "solver", "in", "solvers", "]", "solvers_with_invalid_keys", "=", "[", "(", "key", ",", "solver", ")", "for", "key", ",", "solver", "in", "solvers_with_keys", "if", "key", "is", "None", "]", "solvers_with_valid_keys", "=", "[", "(", "key", ",", "solver", ")", "for", "key", ",", "solver", "in", "solvers_with_keys", "if", "key", "is", "not", "None", "]", "solvers_with_valid_keys", ".", "sort", "(", "key", "=", "operator", ".", "itemgetter", "(", "0", ")", ")", "solvers", "=", "[", "solver", "for", "key", ",", "solver", "in", "chain", "(", "solvers_with_valid_keys", ",", "solvers_with_invalid_keys", ")", "]", "# reverse if necessary (as a separate step from sorting, so it works for invalid keys", "# and plain list reverse without sorting)", "if", "sort_reverse", ":", "solvers", ".", "reverse", "(", ")", "return", "solvers" ]
Return a filtered list of solvers handled by this client. Args: refresh (bool, default=False): Force refresh of cached list of solvers/properties. order_by (callable/str/None, default='avg_load'): Solver sorting key function (or :class:`Solver` attribute/item dot-separated path). By default, solvers are sorted by average load. To explicitly not sort the solvers (and use the API-returned order), set ``order_by=None``. Signature of the `key` `callable` is:: key :: (Solver s, Ord k) => s -> k Basic structure of the `key` string path is:: "-"? (attr|item) ( "." (attr|item) )* For example, to use solver property named ``max_anneal_schedule_points``, available in ``Solver.properties`` dict, you can either specify a callable `key`:: key=lambda solver: solver.properties['max_anneal_schedule_points'] or, you can use a short string path based key:: key='properties.max_anneal_schedule_points' Solver derived properties, available as :class:`Solver` properties can also be used (e.g. ``num_active_qubits``, ``online``, ``avg_load``, etc). Ascending sort order is implied, unless the key string path does not start with ``-``, in which case descending sort is used. Note: the sort used for ordering solvers by `key` is **stable**, meaning that if multiple solvers have the same value for the key, their relative order is preserved, and effectively they are in the same order as returned by the API. Note: solvers with ``None`` for key appear last in the list of solvers. When providing a key callable, ensure all values returned are of the same type (particularly in Python 3). For solvers with undefined key value, return ``None``. **filters: See `Filtering forms` and `Operators` below. Solver filters are defined, similarly to Django QuerySet filters, with keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`. Each `<operator>` is a predicate (boolean) function that acts on two arguments: value of feature `<name>` (described with keys path `<key1.key2...keyN>`) and the required `<value>`. Feature `<name>` can be: 1) a derived solver property, available as an identically named :class:`Solver`'s property (`name`, `qpu`, `software`, `online`, `num_active_qubits`, `avg_load`) 2) a solver parameter, available in :obj:`Solver.parameters` 3) a solver property, available in :obj:`Solver.properties` 4) a path describing a property in nested dictionaries Filtering forms are: * <derived_property>__<operator> (object <value>) * <derived_property> (bool) This form ensures the value of solver's property bound to `derived_property`, after applying `operator` equals the `value`. The default operator is `eq`. For example:: >>> client.get_solvers(avg_load__gt=0.5) but also:: >>> client.get_solvers(online=True) >>> # identical to: >>> client.get_solvers(online__eq=True) * <parameter>__<operator> (object <value>) * <parameter> (bool) This form ensures that the solver supports `parameter`. General operator form can be used but usually does not make sense for parameters, since values are human-readable descriptions. The default operator is `available`. Example:: >>> client.get_solvers(flux_biases=True) >>> # identical to: >>> client.get_solvers(flux_biases__available=True) * <property>__<operator> (object <value>) * <property> (bool) This form ensures the value of the solver's `property`, after applying `operator` equals the righthand side `value`. The default operator is `eq`. Note: if a non-existing parameter/property name/key given, the default operator is `eq`. Operators are: * `available` (<name>: str, <value>: bool): Test availability of <name> feature. * `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any): Standard relational operators that compare feature <name> value with <value>. * `regex` (<name>: str, <value>: str): Test regular expression matching feature value. * `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list): Test feature <name> value (which should be a *range*) covers a given value or a subrange. * `within` (<name>: str, <value>: range expressed as 2-tuple/list): Test feature <name> value (which can be a *single value* or a *range*) is within a given range. * `in` (<name>: str, <value>: container type): Test feature <name> value is *in* <value> container. * `contains` (<name>: str, <value>: any): Test feature <name> value (container type) *contains* <value>. * `issubset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a subset of <value>. * `issuperset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a superset of <value>. Derived properies are: * `name` (str): Solver name/id. * `qpu` (bool): Is solver QPU based? * `software` (bool): Is solver software based? * `online` (bool, default=True): Is solver online? * `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`. * `avg_load` (float): Solver's average load (similar to Unix load average). Common solver parameters are: * `flux_biases`: Should solver accept flux biases? * `anneal_schedule`: Should solver accept anneal schedule? Common solver properties are: * `num_qubits` (int): Number of qubits available. * `vfyc` (bool): Should solver work on "virtual full-yield chip"? * `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points. * `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range. * `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter. Returns: list[Solver]: List of all solvers that satisfy the conditions. Note: Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`) already filter solvers by resource type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers` on base class :class:`~dwave.cloud.client.Client`. Examples:: client.get_solvers( num_qubits__gt=2000, # we need more than 2000 qubits num_qubits__lt=4000, # ... but fewer than 4000 qubits num_qubits__within=(2000, 4000), # an alternative to the previous two lines num_active_qubits=1089, # we want a particular number of active qubits vfyc=True, # we require a fully yielded Chimera vfyc__in=[False, None], # inverse of the previous filter vfyc__available=False, # we want solvers that do not advertize the vfyc property anneal_schedule=True, # we need support for custom anneal schedule max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule num_reads_range__covers=1000, # our solver must support returning 1000 reads extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2] couplings__contains=[0, 128], # coupling (edge between) qubits (0,128) must exist couplings__issuperset=[[0,128], [0,4]], # two couplings required: (0,128) and (0,4) qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist supported_problem_types__issubset={'ising', 'qubo'}, # require Ising, QUBO or both to be supported name='DW_2000Q_3', # full solver name/ID match name__regex='.*2000.*', # partial/regex-based solver name match chip_id__regex='DW_.*', # chip ID prefix must be DW_ topology__type__eq="chimera" # topology.type must be chimera )
[ "Return", "a", "filtered", "list", "of", "solvers", "handled", "by", "this", "client", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L553-L885
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client.solvers
def solvers(self, refresh=False, **filters): """Deprecated in favor of :meth:`.get_solvers`.""" warnings.warn("'solvers' is deprecated in favor of 'get_solvers'.", DeprecationWarning) return self.get_solvers(refresh=refresh, **filters)
python
def solvers(self, refresh=False, **filters): """Deprecated in favor of :meth:`.get_solvers`.""" warnings.warn("'solvers' is deprecated in favor of 'get_solvers'.", DeprecationWarning) return self.get_solvers(refresh=refresh, **filters)
[ "def", "solvers", "(", "self", ",", "refresh", "=", "False", ",", "*", "*", "filters", ")", ":", "warnings", ".", "warn", "(", "\"'solvers' is deprecated in favor of 'get_solvers'.\"", ",", "DeprecationWarning", ")", "return", "self", ".", "get_solvers", "(", "refresh", "=", "refresh", ",", "*", "*", "filters", ")" ]
Deprecated in favor of :meth:`.get_solvers`.
[ "Deprecated", "in", "favor", "of", ":", "meth", ":", ".", "get_solvers", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L887-L890
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client.get_solver
def get_solver(self, name=None, refresh=False, **filters): """Load the configuration for a single solver. Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}` is a URL configured for the client, and returns a :class:`.Solver` instance that can be used to submit sampling problems to the D-Wave API and retrieve results. Args: name (str): ID of the requested solver. ``None`` returns the default solver. If default solver is not configured, ``None`` returns the first available solver in ``Client``'s class (QPU/software/base). **filters (keyword arguments, optional): Dictionary of filters over features this solver has to have. For a list of feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`. order_by (callable/str, default='id'): Solver sorting key function (or :class:`Solver` attribute name). By default, solvers are sorted by ID/name. refresh (bool): Return solver from cache (if cached with ``get_solvers()``), unless set to ``True``. Returns: :class:`.Solver` Examples: This example creates two solvers for a client instantiated from a local system's auto-detected default configuration file, which configures a connection to a D-Wave resource that provides two solvers. The first uses the default solver, the second explicitly selects another solver. >>> from dwave.cloud import Client >>> client = Client.from_config() >>> client.get_solvers() # doctest: +SKIP [Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')] >>> solver1 = client.get_solver() # doctest: +SKIP >>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP >>> solver1.id # doctest: +SKIP '2000Q_ONLINE_SOLVER1' >>> solver2.id # doctest: +SKIP '2000Q_ONLINE_SOLVER2' >>> # code that uses client >>> client.close() # doctest: +SKIP """ _LOGGER.debug("Requested a solver that best matches feature filters=%r", filters) # backward compatibility: name as the first feature if name is not None: filters.setdefault('name', name) # in absence of other filters, config/env solver filters/name are used if not filters and self.default_solver: filters = self.default_solver # get the first solver that satisfies all filters try: _LOGGER.debug("Fetching solvers according to filters=%r", filters) return self.get_solvers(refresh=refresh, **filters)[0] except IndexError: raise SolverNotFoundError("Solver with the requested features not available")
python
def get_solver(self, name=None, refresh=False, **filters): """Load the configuration for a single solver. Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}` is a URL configured for the client, and returns a :class:`.Solver` instance that can be used to submit sampling problems to the D-Wave API and retrieve results. Args: name (str): ID of the requested solver. ``None`` returns the default solver. If default solver is not configured, ``None`` returns the first available solver in ``Client``'s class (QPU/software/base). **filters (keyword arguments, optional): Dictionary of filters over features this solver has to have. For a list of feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`. order_by (callable/str, default='id'): Solver sorting key function (or :class:`Solver` attribute name). By default, solvers are sorted by ID/name. refresh (bool): Return solver from cache (if cached with ``get_solvers()``), unless set to ``True``. Returns: :class:`.Solver` Examples: This example creates two solvers for a client instantiated from a local system's auto-detected default configuration file, which configures a connection to a D-Wave resource that provides two solvers. The first uses the default solver, the second explicitly selects another solver. >>> from dwave.cloud import Client >>> client = Client.from_config() >>> client.get_solvers() # doctest: +SKIP [Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')] >>> solver1 = client.get_solver() # doctest: +SKIP >>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP >>> solver1.id # doctest: +SKIP '2000Q_ONLINE_SOLVER1' >>> solver2.id # doctest: +SKIP '2000Q_ONLINE_SOLVER2' >>> # code that uses client >>> client.close() # doctest: +SKIP """ _LOGGER.debug("Requested a solver that best matches feature filters=%r", filters) # backward compatibility: name as the first feature if name is not None: filters.setdefault('name', name) # in absence of other filters, config/env solver filters/name are used if not filters and self.default_solver: filters = self.default_solver # get the first solver that satisfies all filters try: _LOGGER.debug("Fetching solvers according to filters=%r", filters) return self.get_solvers(refresh=refresh, **filters)[0] except IndexError: raise SolverNotFoundError("Solver with the requested features not available")
[ "def", "get_solver", "(", "self", ",", "name", "=", "None", ",", "refresh", "=", "False", ",", "*", "*", "filters", ")", ":", "_LOGGER", ".", "debug", "(", "\"Requested a solver that best matches feature filters=%r\"", ",", "filters", ")", "# backward compatibility: name as the first feature", "if", "name", "is", "not", "None", ":", "filters", ".", "setdefault", "(", "'name'", ",", "name", ")", "# in absence of other filters, config/env solver filters/name are used", "if", "not", "filters", "and", "self", ".", "default_solver", ":", "filters", "=", "self", ".", "default_solver", "# get the first solver that satisfies all filters", "try", ":", "_LOGGER", ".", "debug", "(", "\"Fetching solvers according to filters=%r\"", ",", "filters", ")", "return", "self", ".", "get_solvers", "(", "refresh", "=", "refresh", ",", "*", "*", "filters", ")", "[", "0", "]", "except", "IndexError", ":", "raise", "SolverNotFoundError", "(", "\"Solver with the requested features not available\"", ")" ]
Load the configuration for a single solver. Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}` is a URL configured for the client, and returns a :class:`.Solver` instance that can be used to submit sampling problems to the D-Wave API and retrieve results. Args: name (str): ID of the requested solver. ``None`` returns the default solver. If default solver is not configured, ``None`` returns the first available solver in ``Client``'s class (QPU/software/base). **filters (keyword arguments, optional): Dictionary of filters over features this solver has to have. For a list of feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`. order_by (callable/str, default='id'): Solver sorting key function (or :class:`Solver` attribute name). By default, solvers are sorted by ID/name. refresh (bool): Return solver from cache (if cached with ``get_solvers()``), unless set to ``True``. Returns: :class:`.Solver` Examples: This example creates two solvers for a client instantiated from a local system's auto-detected default configuration file, which configures a connection to a D-Wave resource that provides two solvers. The first uses the default solver, the second explicitly selects another solver. >>> from dwave.cloud import Client >>> client = Client.from_config() >>> client.get_solvers() # doctest: +SKIP [Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')] >>> solver1 = client.get_solver() # doctest: +SKIP >>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP >>> solver1.id # doctest: +SKIP '2000Q_ONLINE_SOLVER1' >>> solver2.id # doctest: +SKIP '2000Q_ONLINE_SOLVER2' >>> # code that uses client >>> client.close() # doctest: +SKIP
[ "Load", "the", "configuration", "for", "a", "single", "solver", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L892-L955
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client._submit
def _submit(self, body, future): """Enqueue a problem for submission to the server. This method is thread safe. """ self._submission_queue.put(self._submit.Message(body, future))
python
def _submit(self, body, future): """Enqueue a problem for submission to the server. This method is thread safe. """ self._submission_queue.put(self._submit.Message(body, future))
[ "def", "_submit", "(", "self", ",", "body", ",", "future", ")", ":", "self", ".", "_submission_queue", ".", "put", "(", "self", ".", "_submit", ".", "Message", "(", "body", ",", "future", ")", ")" ]
Enqueue a problem for submission to the server. This method is thread safe.
[ "Enqueue", "a", "problem", "for", "submission", "to", "the", "server", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L957-L962
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client._do_submit_problems
def _do_submit_problems(self): """Pull problems from the submission queue and submit them. Note: This method is always run inside of a daemon thread. """ try: while True: # Pull as many problems as we can, block on the first one, # but once we have one problem, switch to non-blocking then # submit without blocking again. # `None` task is used to signal thread termination item = self._submission_queue.get() if item is None: break ready_problems = [item] while len(ready_problems) < self._SUBMIT_BATCH_SIZE: try: ready_problems.append(self._submission_queue.get_nowait()) except queue.Empty: break # Submit the problems _LOGGER.debug("Submitting %d problems", len(ready_problems)) body = '[' + ','.join(mess.body for mess in ready_problems) + ']' try: try: response = self.session.post(posixpath.join(self.endpoint, 'problems/'), body) localtime_of_response = epochnow() except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() message = response.json() _LOGGER.debug("Finished submitting %d problems", len(ready_problems)) except BaseException as exception: _LOGGER.debug("Submit failed for %d problems", len(ready_problems)) if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) for mess in ready_problems: mess.future._set_error(exception, sys.exc_info()) self._submission_queue.task_done() continue # Pass on the information for submission, res in zip(ready_problems, message): submission.future._set_clock_diff(response, localtime_of_response) self._handle_problem_status(res, submission.future) self._submission_queue.task_done() # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except BaseException as err: _LOGGER.exception(err)
python
def _do_submit_problems(self): """Pull problems from the submission queue and submit them. Note: This method is always run inside of a daemon thread. """ try: while True: # Pull as many problems as we can, block on the first one, # but once we have one problem, switch to non-blocking then # submit without blocking again. # `None` task is used to signal thread termination item = self._submission_queue.get() if item is None: break ready_problems = [item] while len(ready_problems) < self._SUBMIT_BATCH_SIZE: try: ready_problems.append(self._submission_queue.get_nowait()) except queue.Empty: break # Submit the problems _LOGGER.debug("Submitting %d problems", len(ready_problems)) body = '[' + ','.join(mess.body for mess in ready_problems) + ']' try: try: response = self.session.post(posixpath.join(self.endpoint, 'problems/'), body) localtime_of_response = epochnow() except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() message = response.json() _LOGGER.debug("Finished submitting %d problems", len(ready_problems)) except BaseException as exception: _LOGGER.debug("Submit failed for %d problems", len(ready_problems)) if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) for mess in ready_problems: mess.future._set_error(exception, sys.exc_info()) self._submission_queue.task_done() continue # Pass on the information for submission, res in zip(ready_problems, message): submission.future._set_clock_diff(response, localtime_of_response) self._handle_problem_status(res, submission.future) self._submission_queue.task_done() # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except BaseException as err: _LOGGER.exception(err)
[ "def", "_do_submit_problems", "(", "self", ")", ":", "try", ":", "while", "True", ":", "# Pull as many problems as we can, block on the first one,", "# but once we have one problem, switch to non-blocking then", "# submit without blocking again.", "# `None` task is used to signal thread termination", "item", "=", "self", ".", "_submission_queue", ".", "get", "(", ")", "if", "item", "is", "None", ":", "break", "ready_problems", "=", "[", "item", "]", "while", "len", "(", "ready_problems", ")", "<", "self", ".", "_SUBMIT_BATCH_SIZE", ":", "try", ":", "ready_problems", ".", "append", "(", "self", ".", "_submission_queue", ".", "get_nowait", "(", ")", ")", "except", "queue", ".", "Empty", ":", "break", "# Submit the problems", "_LOGGER", ".", "debug", "(", "\"Submitting %d problems\"", ",", "len", "(", "ready_problems", ")", ")", "body", "=", "'['", "+", "','", ".", "join", "(", "mess", ".", "body", "for", "mess", "in", "ready_problems", ")", "+", "']'", "try", ":", "try", ":", "response", "=", "self", ".", "session", ".", "post", "(", "posixpath", ".", "join", "(", "self", ".", "endpoint", ",", "'problems/'", ")", ",", "body", ")", "localtime_of_response", "=", "epochnow", "(", ")", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "raise", "RequestTimeout", "if", "response", ".", "status_code", "==", "401", ":", "raise", "SolverAuthenticationError", "(", ")", "response", ".", "raise_for_status", "(", ")", "message", "=", "response", ".", "json", "(", ")", "_LOGGER", ".", "debug", "(", "\"Finished submitting %d problems\"", ",", "len", "(", "ready_problems", ")", ")", "except", "BaseException", "as", "exception", ":", "_LOGGER", ".", "debug", "(", "\"Submit failed for %d problems\"", ",", "len", "(", "ready_problems", ")", ")", "if", "not", "isinstance", "(", "exception", ",", "SolverAuthenticationError", ")", ":", "exception", "=", "IOError", "(", "exception", ")", "for", "mess", "in", "ready_problems", ":", "mess", ".", "future", ".", "_set_error", "(", "exception", ",", "sys", ".", "exc_info", "(", ")", ")", "self", ".", "_submission_queue", ".", "task_done", "(", ")", "continue", "# Pass on the information", "for", "submission", ",", "res", "in", "zip", "(", "ready_problems", ",", "message", ")", ":", "submission", ".", "future", ".", "_set_clock_diff", "(", "response", ",", "localtime_of_response", ")", "self", ".", "_handle_problem_status", "(", "res", ",", "submission", ".", "future", ")", "self", ".", "_submission_queue", ".", "task_done", "(", ")", "# this is equivalent to a yield to scheduler in other threading libraries", "time", ".", "sleep", "(", "0", ")", "except", "BaseException", "as", "err", ":", "_LOGGER", ".", "exception", "(", "err", ")" ]
Pull problems from the submission queue and submit them. Note: This method is always run inside of a daemon thread.
[ "Pull", "problems", "from", "the", "submission", "queue", "and", "submit", "them", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L965-L1025
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client._handle_problem_status
def _handle_problem_status(self, message, future): """Handle the results of a problem submission or results request. This method checks the status of the problem and puts it in the correct queue. Args: message (dict): Update message from the SAPI server wrt. this problem. future `Future`: future corresponding to the problem Note: This method is always run inside of a daemon thread. """ try: _LOGGER.trace("Handling response: %r", message) _LOGGER.debug("Handling response for %s with status %s", message.get('id'), message.get('status')) # Handle errors in batch mode if 'error_code' in message and 'error_msg' in message: raise SolverFailureError(message['error_msg']) if 'status' not in message: raise InvalidAPIResponseError("'status' missing in problem description response") if 'id' not in message: raise InvalidAPIResponseError("'id' missing in problem description response") future.id = message['id'] future.remote_status = status = message['status'] # The future may not have the ID set yet with future._single_cancel_lock: # This handles the case where cancel has been called on a future # before that future received the problem id if future._cancel_requested: if not future._cancel_sent and status == self.STATUS_PENDING: # The problem has been canceled but the status says its still in queue # try to cancel it self._cancel(message['id'], future) # If a cancel request could meaningfully be sent it has been now future._cancel_sent = True if not future.time_received and message.get('submitted_on'): future.time_received = parse_datetime(message['submitted_on']) if not future.time_solved and message.get('solved_on'): future.time_solved = parse_datetime(message['solved_on']) if not future.eta_min and message.get('earliest_estimated_completion'): future.eta_min = parse_datetime(message['earliest_estimated_completion']) if not future.eta_max and message.get('latest_estimated_completion'): future.eta_max = parse_datetime(message['latest_estimated_completion']) if status == self.STATUS_COMPLETE: # TODO: find a better way to differentiate between # `completed-on-submit` and `completed-on-poll`. # Loading should happen only once, not every time when response # doesn't contain 'answer'. # If the message is complete, forward it to the future object if 'answer' in message: future._set_message(message) # If the problem is complete, but we don't have the result data # put the problem in the queue for loading results. else: self._load(future) elif status in self.ANY_STATUS_ONGOING: # If the response is pending add it to the queue. self._poll(future) elif status == self.STATUS_CANCELLED: # If canceled return error raise CanceledFutureError() else: # Return an error to the future object errmsg = message.get('error_message', 'An unknown error has occurred.') if 'solver is offline' in errmsg.lower(): raise SolverOfflineError(errmsg) else: raise SolverFailureError(errmsg) except Exception as error: # If there were any unhandled errors we need to release the # lock in the future, otherwise deadlock occurs. future._set_error(error, sys.exc_info())
python
def _handle_problem_status(self, message, future): """Handle the results of a problem submission or results request. This method checks the status of the problem and puts it in the correct queue. Args: message (dict): Update message from the SAPI server wrt. this problem. future `Future`: future corresponding to the problem Note: This method is always run inside of a daemon thread. """ try: _LOGGER.trace("Handling response: %r", message) _LOGGER.debug("Handling response for %s with status %s", message.get('id'), message.get('status')) # Handle errors in batch mode if 'error_code' in message and 'error_msg' in message: raise SolverFailureError(message['error_msg']) if 'status' not in message: raise InvalidAPIResponseError("'status' missing in problem description response") if 'id' not in message: raise InvalidAPIResponseError("'id' missing in problem description response") future.id = message['id'] future.remote_status = status = message['status'] # The future may not have the ID set yet with future._single_cancel_lock: # This handles the case where cancel has been called on a future # before that future received the problem id if future._cancel_requested: if not future._cancel_sent and status == self.STATUS_PENDING: # The problem has been canceled but the status says its still in queue # try to cancel it self._cancel(message['id'], future) # If a cancel request could meaningfully be sent it has been now future._cancel_sent = True if not future.time_received and message.get('submitted_on'): future.time_received = parse_datetime(message['submitted_on']) if not future.time_solved and message.get('solved_on'): future.time_solved = parse_datetime(message['solved_on']) if not future.eta_min and message.get('earliest_estimated_completion'): future.eta_min = parse_datetime(message['earliest_estimated_completion']) if not future.eta_max and message.get('latest_estimated_completion'): future.eta_max = parse_datetime(message['latest_estimated_completion']) if status == self.STATUS_COMPLETE: # TODO: find a better way to differentiate between # `completed-on-submit` and `completed-on-poll`. # Loading should happen only once, not every time when response # doesn't contain 'answer'. # If the message is complete, forward it to the future object if 'answer' in message: future._set_message(message) # If the problem is complete, but we don't have the result data # put the problem in the queue for loading results. else: self._load(future) elif status in self.ANY_STATUS_ONGOING: # If the response is pending add it to the queue. self._poll(future) elif status == self.STATUS_CANCELLED: # If canceled return error raise CanceledFutureError() else: # Return an error to the future object errmsg = message.get('error_message', 'An unknown error has occurred.') if 'solver is offline' in errmsg.lower(): raise SolverOfflineError(errmsg) else: raise SolverFailureError(errmsg) except Exception as error: # If there were any unhandled errors we need to release the # lock in the future, otherwise deadlock occurs. future._set_error(error, sys.exc_info())
[ "def", "_handle_problem_status", "(", "self", ",", "message", ",", "future", ")", ":", "try", ":", "_LOGGER", ".", "trace", "(", "\"Handling response: %r\"", ",", "message", ")", "_LOGGER", ".", "debug", "(", "\"Handling response for %s with status %s\"", ",", "message", ".", "get", "(", "'id'", ")", ",", "message", ".", "get", "(", "'status'", ")", ")", "# Handle errors in batch mode", "if", "'error_code'", "in", "message", "and", "'error_msg'", "in", "message", ":", "raise", "SolverFailureError", "(", "message", "[", "'error_msg'", "]", ")", "if", "'status'", "not", "in", "message", ":", "raise", "InvalidAPIResponseError", "(", "\"'status' missing in problem description response\"", ")", "if", "'id'", "not", "in", "message", ":", "raise", "InvalidAPIResponseError", "(", "\"'id' missing in problem description response\"", ")", "future", ".", "id", "=", "message", "[", "'id'", "]", "future", ".", "remote_status", "=", "status", "=", "message", "[", "'status'", "]", "# The future may not have the ID set yet", "with", "future", ".", "_single_cancel_lock", ":", "# This handles the case where cancel has been called on a future", "# before that future received the problem id", "if", "future", ".", "_cancel_requested", ":", "if", "not", "future", ".", "_cancel_sent", "and", "status", "==", "self", ".", "STATUS_PENDING", ":", "# The problem has been canceled but the status says its still in queue", "# try to cancel it", "self", ".", "_cancel", "(", "message", "[", "'id'", "]", ",", "future", ")", "# If a cancel request could meaningfully be sent it has been now", "future", ".", "_cancel_sent", "=", "True", "if", "not", "future", ".", "time_received", "and", "message", ".", "get", "(", "'submitted_on'", ")", ":", "future", ".", "time_received", "=", "parse_datetime", "(", "message", "[", "'submitted_on'", "]", ")", "if", "not", "future", ".", "time_solved", "and", "message", ".", "get", "(", "'solved_on'", ")", ":", "future", ".", "time_solved", "=", "parse_datetime", "(", "message", "[", "'solved_on'", "]", ")", "if", "not", "future", ".", "eta_min", "and", "message", ".", "get", "(", "'earliest_estimated_completion'", ")", ":", "future", ".", "eta_min", "=", "parse_datetime", "(", "message", "[", "'earliest_estimated_completion'", "]", ")", "if", "not", "future", ".", "eta_max", "and", "message", ".", "get", "(", "'latest_estimated_completion'", ")", ":", "future", ".", "eta_max", "=", "parse_datetime", "(", "message", "[", "'latest_estimated_completion'", "]", ")", "if", "status", "==", "self", ".", "STATUS_COMPLETE", ":", "# TODO: find a better way to differentiate between", "# `completed-on-submit` and `completed-on-poll`.", "# Loading should happen only once, not every time when response", "# doesn't contain 'answer'.", "# If the message is complete, forward it to the future object", "if", "'answer'", "in", "message", ":", "future", ".", "_set_message", "(", "message", ")", "# If the problem is complete, but we don't have the result data", "# put the problem in the queue for loading results.", "else", ":", "self", ".", "_load", "(", "future", ")", "elif", "status", "in", "self", ".", "ANY_STATUS_ONGOING", ":", "# If the response is pending add it to the queue.", "self", ".", "_poll", "(", "future", ")", "elif", "status", "==", "self", ".", "STATUS_CANCELLED", ":", "# If canceled return error", "raise", "CanceledFutureError", "(", ")", "else", ":", "# Return an error to the future object", "errmsg", "=", "message", ".", "get", "(", "'error_message'", ",", "'An unknown error has occurred.'", ")", "if", "'solver is offline'", "in", "errmsg", ".", "lower", "(", ")", ":", "raise", "SolverOfflineError", "(", "errmsg", ")", "else", ":", "raise", "SolverFailureError", "(", "errmsg", ")", "except", "Exception", "as", "error", ":", "# If there were any unhandled errors we need to release the", "# lock in the future, otherwise deadlock occurs.", "future", ".", "_set_error", "(", "error", ",", "sys", ".", "exc_info", "(", ")", ")" ]
Handle the results of a problem submission or results request. This method checks the status of the problem and puts it in the correct queue. Args: message (dict): Update message from the SAPI server wrt. this problem. future `Future`: future corresponding to the problem Note: This method is always run inside of a daemon thread.
[ "Handle", "the", "results", "of", "a", "problem", "submission", "or", "results", "request", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L1027-L1109
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client._do_cancel_problems
def _do_cancel_problems(self): """Pull ids from the cancel queue and submit them. Note: This method is always run inside of a daemon thread. """ try: while True: # Pull as many problems as we can, block when none are available. # `None` task is used to signal thread termination item = self._cancel_queue.get() if item is None: break item_list = [item] while True: try: item_list.append(self._cancel_queue.get_nowait()) except queue.Empty: break # Submit the problems, attach the ids as a json list in the # body of the delete query. try: body = [item[0] for item in item_list] try: self.session.delete(posixpath.join(self.endpoint, 'problems/'), json=body) except requests.exceptions.Timeout: raise RequestTimeout except Exception as err: for _, future in item_list: if future is not None: future._set_error(err, sys.exc_info()) # Mark all the ids as processed regardless of success or failure. [self._cancel_queue.task_done() for _ in item_list] # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except Exception as err: _LOGGER.exception(err)
python
def _do_cancel_problems(self): """Pull ids from the cancel queue and submit them. Note: This method is always run inside of a daemon thread. """ try: while True: # Pull as many problems as we can, block when none are available. # `None` task is used to signal thread termination item = self._cancel_queue.get() if item is None: break item_list = [item] while True: try: item_list.append(self._cancel_queue.get_nowait()) except queue.Empty: break # Submit the problems, attach the ids as a json list in the # body of the delete query. try: body = [item[0] for item in item_list] try: self.session.delete(posixpath.join(self.endpoint, 'problems/'), json=body) except requests.exceptions.Timeout: raise RequestTimeout except Exception as err: for _, future in item_list: if future is not None: future._set_error(err, sys.exc_info()) # Mark all the ids as processed regardless of success or failure. [self._cancel_queue.task_done() for _ in item_list] # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except Exception as err: _LOGGER.exception(err)
[ "def", "_do_cancel_problems", "(", "self", ")", ":", "try", ":", "while", "True", ":", "# Pull as many problems as we can, block when none are available.", "# `None` task is used to signal thread termination", "item", "=", "self", ".", "_cancel_queue", ".", "get", "(", ")", "if", "item", "is", "None", ":", "break", "item_list", "=", "[", "item", "]", "while", "True", ":", "try", ":", "item_list", ".", "append", "(", "self", ".", "_cancel_queue", ".", "get_nowait", "(", ")", ")", "except", "queue", ".", "Empty", ":", "break", "# Submit the problems, attach the ids as a json list in the", "# body of the delete query.", "try", ":", "body", "=", "[", "item", "[", "0", "]", "for", "item", "in", "item_list", "]", "try", ":", "self", ".", "session", ".", "delete", "(", "posixpath", ".", "join", "(", "self", ".", "endpoint", ",", "'problems/'", ")", ",", "json", "=", "body", ")", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "raise", "RequestTimeout", "except", "Exception", "as", "err", ":", "for", "_", ",", "future", "in", "item_list", ":", "if", "future", "is", "not", "None", ":", "future", ".", "_set_error", "(", "err", ",", "sys", ".", "exc_info", "(", ")", ")", "# Mark all the ids as processed regardless of success or failure.", "[", "self", ".", "_cancel_queue", ".", "task_done", "(", ")", "for", "_", "in", "item_list", "]", "# this is equivalent to a yield to scheduler in other threading libraries", "time", ".", "sleep", "(", "0", ")", "except", "Exception", "as", "err", ":", "_LOGGER", ".", "exception", "(", "err", ")" ]
Pull ids from the cancel queue and submit them. Note: This method is always run inside of a daemon thread.
[ "Pull", "ids", "from", "the", "cancel", "queue", "and", "submit", "them", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L1118-L1162
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client._poll
def _poll(self, future): """Enqueue a problem to poll the server for status.""" if future._poll_backoff is None: # on first poll, start with minimal back-off future._poll_backoff = self._POLL_BACKOFF_MIN # if we have ETA of results, schedule the first poll for then if future.eta_min and self._is_clock_diff_acceptable(future): at = datetime_to_timestamp(future.eta_min) _LOGGER.debug("Response ETA indicated and local clock reliable. " "Scheduling first polling at +%.2f sec", at - epochnow()) else: at = time.time() + future._poll_backoff _LOGGER.debug("Response ETA not indicated, or local clock unreliable. " "Scheduling first polling at +%.2f sec", at - epochnow()) else: # update exponential poll back-off, clipped to a range future._poll_backoff = \ max(self._POLL_BACKOFF_MIN, min(future._poll_backoff * 2, self._POLL_BACKOFF_MAX)) # for poll priority we use timestamp of next scheduled poll at = time.time() + future._poll_backoff now = utcnow() future_age = (now - future.time_created).total_seconds() _LOGGER.debug("Polling scheduled at %.2f with %.2f sec new back-off for: %s (future's age: %.2f sec)", at, future._poll_backoff, future.id, future_age) # don't enqueue for next poll if polling_timeout is exceeded by then future_age_on_next_poll = future_age + (at - datetime_to_timestamp(now)) if self.polling_timeout is not None and future_age_on_next_poll > self.polling_timeout: _LOGGER.debug("Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!", future_age_on_next_poll, self.polling_timeout) raise PollingTimeout self._poll_queue.put((at, future))
python
def _poll(self, future): """Enqueue a problem to poll the server for status.""" if future._poll_backoff is None: # on first poll, start with minimal back-off future._poll_backoff = self._POLL_BACKOFF_MIN # if we have ETA of results, schedule the first poll for then if future.eta_min and self._is_clock_diff_acceptable(future): at = datetime_to_timestamp(future.eta_min) _LOGGER.debug("Response ETA indicated and local clock reliable. " "Scheduling first polling at +%.2f sec", at - epochnow()) else: at = time.time() + future._poll_backoff _LOGGER.debug("Response ETA not indicated, or local clock unreliable. " "Scheduling first polling at +%.2f sec", at - epochnow()) else: # update exponential poll back-off, clipped to a range future._poll_backoff = \ max(self._POLL_BACKOFF_MIN, min(future._poll_backoff * 2, self._POLL_BACKOFF_MAX)) # for poll priority we use timestamp of next scheduled poll at = time.time() + future._poll_backoff now = utcnow() future_age = (now - future.time_created).total_seconds() _LOGGER.debug("Polling scheduled at %.2f with %.2f sec new back-off for: %s (future's age: %.2f sec)", at, future._poll_backoff, future.id, future_age) # don't enqueue for next poll if polling_timeout is exceeded by then future_age_on_next_poll = future_age + (at - datetime_to_timestamp(now)) if self.polling_timeout is not None and future_age_on_next_poll > self.polling_timeout: _LOGGER.debug("Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!", future_age_on_next_poll, self.polling_timeout) raise PollingTimeout self._poll_queue.put((at, future))
[ "def", "_poll", "(", "self", ",", "future", ")", ":", "if", "future", ".", "_poll_backoff", "is", "None", ":", "# on first poll, start with minimal back-off", "future", ".", "_poll_backoff", "=", "self", ".", "_POLL_BACKOFF_MIN", "# if we have ETA of results, schedule the first poll for then", "if", "future", ".", "eta_min", "and", "self", ".", "_is_clock_diff_acceptable", "(", "future", ")", ":", "at", "=", "datetime_to_timestamp", "(", "future", ".", "eta_min", ")", "_LOGGER", ".", "debug", "(", "\"Response ETA indicated and local clock reliable. \"", "\"Scheduling first polling at +%.2f sec\"", ",", "at", "-", "epochnow", "(", ")", ")", "else", ":", "at", "=", "time", ".", "time", "(", ")", "+", "future", ".", "_poll_backoff", "_LOGGER", ".", "debug", "(", "\"Response ETA not indicated, or local clock unreliable. \"", "\"Scheduling first polling at +%.2f sec\"", ",", "at", "-", "epochnow", "(", ")", ")", "else", ":", "# update exponential poll back-off, clipped to a range", "future", ".", "_poll_backoff", "=", "max", "(", "self", ".", "_POLL_BACKOFF_MIN", ",", "min", "(", "future", ".", "_poll_backoff", "*", "2", ",", "self", ".", "_POLL_BACKOFF_MAX", ")", ")", "# for poll priority we use timestamp of next scheduled poll", "at", "=", "time", ".", "time", "(", ")", "+", "future", ".", "_poll_backoff", "now", "=", "utcnow", "(", ")", "future_age", "=", "(", "now", "-", "future", ".", "time_created", ")", ".", "total_seconds", "(", ")", "_LOGGER", ".", "debug", "(", "\"Polling scheduled at %.2f with %.2f sec new back-off for: %s (future's age: %.2f sec)\"", ",", "at", ",", "future", ".", "_poll_backoff", ",", "future", ".", "id", ",", "future_age", ")", "# don't enqueue for next poll if polling_timeout is exceeded by then", "future_age_on_next_poll", "=", "future_age", "+", "(", "at", "-", "datetime_to_timestamp", "(", "now", ")", ")", "if", "self", ".", "polling_timeout", "is", "not", "None", "and", "future_age_on_next_poll", ">", "self", ".", "polling_timeout", ":", "_LOGGER", ".", "debug", "(", "\"Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!\"", ",", "future_age_on_next_poll", ",", "self", ".", "polling_timeout", ")", "raise", "PollingTimeout", "self", ".", "_poll_queue", ".", "put", "(", "(", "at", ",", "future", ")", ")" ]
Enqueue a problem to poll the server for status.
[ "Enqueue", "a", "problem", "to", "poll", "the", "server", "for", "status", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L1174-L1212
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client._do_poll_problems
def _do_poll_problems(self): """Poll the server for the status of a set of problems. Note: This method is always run inside of a daemon thread. """ try: # grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME) frame_futures = {} def task_done(): self._poll_queue.task_done() def add(future): # add future to query frame_futures # returns: worker lives on? # `None` task signifies thread termination if future is None: task_done() return False if future.id not in frame_futures and not future.done(): frame_futures[future.id] = future else: task_done() return True while True: frame_futures.clear() # blocking add first scheduled frame_earliest, future = self._poll_queue.get() if not add(future): return # try grouping if scheduled within grouping timeframe while len(frame_futures) < self._STATUS_QUERY_SIZE: try: task = self._poll_queue.get_nowait() except queue.Empty: break at, future = task if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME: if not add(future): return else: task_done() self._poll_queue.put(task) break # build a query string with ids of all futures in this frame ids = [future.id for future in frame_futures.values()] _LOGGER.debug("Polling for status of futures: %s", ids) query_string = 'problems/?id=' + ','.join(ids) # if futures were cancelled while `add`ing, skip empty frame if not ids: continue # wait until `frame_earliest` before polling delay = frame_earliest - time.time() if delay > 0: _LOGGER.debug("Pausing polling %.2f sec for futures: %s", delay, ids) time.sleep(delay) else: _LOGGER.trace("Skipping non-positive delay of %.2f sec", delay) try: _LOGGER.trace("Executing poll API request") try: response = self.session.get(posixpath.join(self.endpoint, query_string)) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() statuses = response.json() for status in statuses: self._handle_problem_status(status, frame_futures[status['id']]) except BaseException as exception: if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) for id_ in frame_futures.keys(): frame_futures[id_]._set_error(IOError(exception), sys.exc_info()) for id_ in frame_futures.keys(): task_done() time.sleep(0) except Exception as err: _LOGGER.exception(err)
python
def _do_poll_problems(self): """Poll the server for the status of a set of problems. Note: This method is always run inside of a daemon thread. """ try: # grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME) frame_futures = {} def task_done(): self._poll_queue.task_done() def add(future): # add future to query frame_futures # returns: worker lives on? # `None` task signifies thread termination if future is None: task_done() return False if future.id not in frame_futures and not future.done(): frame_futures[future.id] = future else: task_done() return True while True: frame_futures.clear() # blocking add first scheduled frame_earliest, future = self._poll_queue.get() if not add(future): return # try grouping if scheduled within grouping timeframe while len(frame_futures) < self._STATUS_QUERY_SIZE: try: task = self._poll_queue.get_nowait() except queue.Empty: break at, future = task if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME: if not add(future): return else: task_done() self._poll_queue.put(task) break # build a query string with ids of all futures in this frame ids = [future.id for future in frame_futures.values()] _LOGGER.debug("Polling for status of futures: %s", ids) query_string = 'problems/?id=' + ','.join(ids) # if futures were cancelled while `add`ing, skip empty frame if not ids: continue # wait until `frame_earliest` before polling delay = frame_earliest - time.time() if delay > 0: _LOGGER.debug("Pausing polling %.2f sec for futures: %s", delay, ids) time.sleep(delay) else: _LOGGER.trace("Skipping non-positive delay of %.2f sec", delay) try: _LOGGER.trace("Executing poll API request") try: response = self.session.get(posixpath.join(self.endpoint, query_string)) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() statuses = response.json() for status in statuses: self._handle_problem_status(status, frame_futures[status['id']]) except BaseException as exception: if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) for id_ in frame_futures.keys(): frame_futures[id_]._set_error(IOError(exception), sys.exc_info()) for id_ in frame_futures.keys(): task_done() time.sleep(0) except Exception as err: _LOGGER.exception(err)
[ "def", "_do_poll_problems", "(", "self", ")", ":", "try", ":", "# grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME)", "frame_futures", "=", "{", "}", "def", "task_done", "(", ")", ":", "self", ".", "_poll_queue", ".", "task_done", "(", ")", "def", "add", "(", "future", ")", ":", "# add future to query frame_futures", "# returns: worker lives on?", "# `None` task signifies thread termination", "if", "future", "is", "None", ":", "task_done", "(", ")", "return", "False", "if", "future", ".", "id", "not", "in", "frame_futures", "and", "not", "future", ".", "done", "(", ")", ":", "frame_futures", "[", "future", ".", "id", "]", "=", "future", "else", ":", "task_done", "(", ")", "return", "True", "while", "True", ":", "frame_futures", ".", "clear", "(", ")", "# blocking add first scheduled", "frame_earliest", ",", "future", "=", "self", ".", "_poll_queue", ".", "get", "(", ")", "if", "not", "add", "(", "future", ")", ":", "return", "# try grouping if scheduled within grouping timeframe", "while", "len", "(", "frame_futures", ")", "<", "self", ".", "_STATUS_QUERY_SIZE", ":", "try", ":", "task", "=", "self", ".", "_poll_queue", ".", "get_nowait", "(", ")", "except", "queue", ".", "Empty", ":", "break", "at", ",", "future", "=", "task", "if", "at", "-", "frame_earliest", "<=", "self", ".", "_POLL_GROUP_TIMEFRAME", ":", "if", "not", "add", "(", "future", ")", ":", "return", "else", ":", "task_done", "(", ")", "self", ".", "_poll_queue", ".", "put", "(", "task", ")", "break", "# build a query string with ids of all futures in this frame", "ids", "=", "[", "future", ".", "id", "for", "future", "in", "frame_futures", ".", "values", "(", ")", "]", "_LOGGER", ".", "debug", "(", "\"Polling for status of futures: %s\"", ",", "ids", ")", "query_string", "=", "'problems/?id='", "+", "','", ".", "join", "(", "ids", ")", "# if futures were cancelled while `add`ing, skip empty frame", "if", "not", "ids", ":", "continue", "# wait until `frame_earliest` before polling", "delay", "=", "frame_earliest", "-", "time", ".", "time", "(", ")", "if", "delay", ">", "0", ":", "_LOGGER", ".", "debug", "(", "\"Pausing polling %.2f sec for futures: %s\"", ",", "delay", ",", "ids", ")", "time", ".", "sleep", "(", "delay", ")", "else", ":", "_LOGGER", ".", "trace", "(", "\"Skipping non-positive delay of %.2f sec\"", ",", "delay", ")", "try", ":", "_LOGGER", ".", "trace", "(", "\"Executing poll API request\"", ")", "try", ":", "response", "=", "self", ".", "session", ".", "get", "(", "posixpath", ".", "join", "(", "self", ".", "endpoint", ",", "query_string", ")", ")", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "raise", "RequestTimeout", "if", "response", ".", "status_code", "==", "401", ":", "raise", "SolverAuthenticationError", "(", ")", "response", ".", "raise_for_status", "(", ")", "statuses", "=", "response", ".", "json", "(", ")", "for", "status", "in", "statuses", ":", "self", ".", "_handle_problem_status", "(", "status", ",", "frame_futures", "[", "status", "[", "'id'", "]", "]", ")", "except", "BaseException", "as", "exception", ":", "if", "not", "isinstance", "(", "exception", ",", "SolverAuthenticationError", ")", ":", "exception", "=", "IOError", "(", "exception", ")", "for", "id_", "in", "frame_futures", ".", "keys", "(", ")", ":", "frame_futures", "[", "id_", "]", ".", "_set_error", "(", "IOError", "(", "exception", ")", ",", "sys", ".", "exc_info", "(", ")", ")", "for", "id_", "in", "frame_futures", ".", "keys", "(", ")", ":", "task_done", "(", ")", "time", ".", "sleep", "(", "0", ")", "except", "Exception", "as", "err", ":", "_LOGGER", ".", "exception", "(", "err", ")" ]
Poll the server for the status of a set of problems. Note: This method is always run inside of a daemon thread.
[ "Poll", "the", "server", "for", "the", "status", "of", "a", "set", "of", "problems", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L1214-L1313
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
Client._do_load_results
def _do_load_results(self): """Submit a query asking for the results for a particular problem. To request the results of a problem: ``GET /problems/{problem_id}/`` Note: This method is always run inside of a daemon thread. """ try: while True: # Select a problem future = self._load_queue.get() # `None` task signifies thread termination if future is None: break _LOGGER.debug("Loading results of: %s", future.id) # Submit the query query_string = 'problems/{}/'.format(future.id) try: try: response = self.session.get(posixpath.join(self.endpoint, query_string)) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() message = response.json() except BaseException as exception: if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) future._set_error(IOError(exception), sys.exc_info()) continue # Dispatch the results, mark the task complete self._handle_problem_status(message, future) self._load_queue.task_done() # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except Exception as err: _LOGGER.error('Load result error: ' + str(err))
python
def _do_load_results(self): """Submit a query asking for the results for a particular problem. To request the results of a problem: ``GET /problems/{problem_id}/`` Note: This method is always run inside of a daemon thread. """ try: while True: # Select a problem future = self._load_queue.get() # `None` task signifies thread termination if future is None: break _LOGGER.debug("Loading results of: %s", future.id) # Submit the query query_string = 'problems/{}/'.format(future.id) try: try: response = self.session.get(posixpath.join(self.endpoint, query_string)) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() message = response.json() except BaseException as exception: if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) future._set_error(IOError(exception), sys.exc_info()) continue # Dispatch the results, mark the task complete self._handle_problem_status(message, future) self._load_queue.task_done() # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except Exception as err: _LOGGER.error('Load result error: ' + str(err))
[ "def", "_do_load_results", "(", "self", ")", ":", "try", ":", "while", "True", ":", "# Select a problem", "future", "=", "self", ".", "_load_queue", ".", "get", "(", ")", "# `None` task signifies thread termination", "if", "future", "is", "None", ":", "break", "_LOGGER", ".", "debug", "(", "\"Loading results of: %s\"", ",", "future", ".", "id", ")", "# Submit the query", "query_string", "=", "'problems/{}/'", ".", "format", "(", "future", ".", "id", ")", "try", ":", "try", ":", "response", "=", "self", ".", "session", ".", "get", "(", "posixpath", ".", "join", "(", "self", ".", "endpoint", ",", "query_string", ")", ")", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "raise", "RequestTimeout", "if", "response", ".", "status_code", "==", "401", ":", "raise", "SolverAuthenticationError", "(", ")", "response", ".", "raise_for_status", "(", ")", "message", "=", "response", ".", "json", "(", ")", "except", "BaseException", "as", "exception", ":", "if", "not", "isinstance", "(", "exception", ",", "SolverAuthenticationError", ")", ":", "exception", "=", "IOError", "(", "exception", ")", "future", ".", "_set_error", "(", "IOError", "(", "exception", ")", ",", "sys", ".", "exc_info", "(", ")", ")", "continue", "# Dispatch the results, mark the task complete", "self", ".", "_handle_problem_status", "(", "message", ",", "future", ")", "self", ".", "_load_queue", ".", "task_done", "(", ")", "# this is equivalent to a yield to scheduler in other threading libraries", "time", ".", "sleep", "(", "0", ")", "except", "Exception", "as", "err", ":", "_LOGGER", ".", "error", "(", "'Load result error: '", "+", "str", "(", "err", ")", ")" ]
Submit a query asking for the results for a particular problem. To request the results of a problem: ``GET /problems/{problem_id}/`` Note: This method is always run inside of a daemon thread.
[ "Submit", "a", "query", "asking", "for", "the", "results", "for", "a", "particular", "problem", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L1325-L1370
dwavesystems/dwave-cloud-client
dwave/cloud/coders.py
encode_bqm_as_qp
def encode_bqm_as_qp(solver, linear, quadratic): """Encode the binary quadratic problem for submission to a given solver, using the `qp` format for data. Args: solver (:class:`dwave.cloud.solver.Solver`): The solver used. linear (dict[variable, bias]/list[variable, bias]): Linear terms of the model. quadratic (dict[(variable, variable), bias]): Quadratic terms of the model. Returns: encoded submission dictionary """ active = active_qubits(linear, quadratic) # Encode linear terms. The coefficients of the linear terms of the objective # are encoded as an array of little endian 64 bit doubles. # This array is then base64 encoded into a string safe for json. # The order of the terms is determined by the _encoding_qubits property # specified by the server. # Note: only active qubits are coded with double, inactive with NaN nan = float('nan') lin = [uniform_get(linear, qubit, 0 if qubit in active else nan) for qubit in solver._encoding_qubits] lin = base64.b64encode(struct.pack('<' + ('d' * len(lin)), *lin)) # Encode the coefficients of the quadratic terms of the objective # in the same manner as the linear terms, in the order given by the # _encoding_couplers property, discarding tailing zero couplings quad = [quadratic.get((q1,q2), 0) + quadratic.get((q2,q1), 0) for (q1,q2) in solver._encoding_couplers if q1 in active and q2 in active] quad = base64.b64encode(struct.pack('<' + ('d' * len(quad)), *quad)) # The name for this encoding is 'qp' and is explicitly included in the # message for easier extension in the future. return { 'format': 'qp', 'lin': lin.decode('utf-8'), 'quad': quad.decode('utf-8') }
python
def encode_bqm_as_qp(solver, linear, quadratic): """Encode the binary quadratic problem for submission to a given solver, using the `qp` format for data. Args: solver (:class:`dwave.cloud.solver.Solver`): The solver used. linear (dict[variable, bias]/list[variable, bias]): Linear terms of the model. quadratic (dict[(variable, variable), bias]): Quadratic terms of the model. Returns: encoded submission dictionary """ active = active_qubits(linear, quadratic) # Encode linear terms. The coefficients of the linear terms of the objective # are encoded as an array of little endian 64 bit doubles. # This array is then base64 encoded into a string safe for json. # The order of the terms is determined by the _encoding_qubits property # specified by the server. # Note: only active qubits are coded with double, inactive with NaN nan = float('nan') lin = [uniform_get(linear, qubit, 0 if qubit in active else nan) for qubit in solver._encoding_qubits] lin = base64.b64encode(struct.pack('<' + ('d' * len(lin)), *lin)) # Encode the coefficients of the quadratic terms of the objective # in the same manner as the linear terms, in the order given by the # _encoding_couplers property, discarding tailing zero couplings quad = [quadratic.get((q1,q2), 0) + quadratic.get((q2,q1), 0) for (q1,q2) in solver._encoding_couplers if q1 in active and q2 in active] quad = base64.b64encode(struct.pack('<' + ('d' * len(quad)), *quad)) # The name for this encoding is 'qp' and is explicitly included in the # message for easier extension in the future. return { 'format': 'qp', 'lin': lin.decode('utf-8'), 'quad': quad.decode('utf-8') }
[ "def", "encode_bqm_as_qp", "(", "solver", ",", "linear", ",", "quadratic", ")", ":", "active", "=", "active_qubits", "(", "linear", ",", "quadratic", ")", "# Encode linear terms. The coefficients of the linear terms of the objective", "# are encoded as an array of little endian 64 bit doubles.", "# This array is then base64 encoded into a string safe for json.", "# The order of the terms is determined by the _encoding_qubits property", "# specified by the server.", "# Note: only active qubits are coded with double, inactive with NaN", "nan", "=", "float", "(", "'nan'", ")", "lin", "=", "[", "uniform_get", "(", "linear", ",", "qubit", ",", "0", "if", "qubit", "in", "active", "else", "nan", ")", "for", "qubit", "in", "solver", ".", "_encoding_qubits", "]", "lin", "=", "base64", ".", "b64encode", "(", "struct", ".", "pack", "(", "'<'", "+", "(", "'d'", "*", "len", "(", "lin", ")", ")", ",", "*", "lin", ")", ")", "# Encode the coefficients of the quadratic terms of the objective", "# in the same manner as the linear terms, in the order given by the", "# _encoding_couplers property, discarding tailing zero couplings", "quad", "=", "[", "quadratic", ".", "get", "(", "(", "q1", ",", "q2", ")", ",", "0", ")", "+", "quadratic", ".", "get", "(", "(", "q2", ",", "q1", ")", ",", "0", ")", "for", "(", "q1", ",", "q2", ")", "in", "solver", ".", "_encoding_couplers", "if", "q1", "in", "active", "and", "q2", "in", "active", "]", "quad", "=", "base64", ".", "b64encode", "(", "struct", ".", "pack", "(", "'<'", "+", "(", "'d'", "*", "len", "(", "quad", ")", ")", ",", "*", "quad", ")", ")", "# The name for this encoding is 'qp' and is explicitly included in the", "# message for easier extension in the future.", "return", "{", "'format'", ":", "'qp'", ",", "'lin'", ":", "lin", ".", "decode", "(", "'utf-8'", ")", ",", "'quad'", ":", "quad", ".", "decode", "(", "'utf-8'", ")", "}" ]
Encode the binary quadratic problem for submission to a given solver, using the `qp` format for data. Args: solver (:class:`dwave.cloud.solver.Solver`): The solver used. linear (dict[variable, bias]/list[variable, bias]): Linear terms of the model. quadratic (dict[(variable, variable), bias]): Quadratic terms of the model. Returns: encoded submission dictionary
[ "Encode", "the", "binary", "quadratic", "problem", "for", "submission", "to", "a", "given", "solver", "using", "the", "qp", "format", "for", "data", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/coders.py#L26-L70
dwavesystems/dwave-cloud-client
dwave/cloud/coders.py
decode_qp
def decode_qp(msg): """Decode SAPI response that uses `qp` format, without numpy. The 'qp' format is the current encoding used for problems and samples. In this encoding the reply is generally json, but the samples, energy, and histogram data (the occurrence count of each solution), are all base64 encoded arrays. """ # Decode the simple buffers result = msg['answer'] result['active_variables'] = _decode_ints(result['active_variables']) active_variables = result['active_variables'] if 'num_occurrences' in result: result['num_occurrences'] = _decode_ints(result['num_occurrences']) result['energies'] = _decode_doubles(result['energies']) # Measure out the size of the binary solution data num_solutions = len(result['energies']) num_variables = len(result['active_variables']) solution_bytes = -(-num_variables // 8) # equivalent to int(math.ceil(num_variables / 8.)) total_variables = result['num_variables'] # Figure out the null value for output default = 3 if msg['type'] == 'qubo' else 0 # Decode the solutions, which will be byte aligned in binary format binary = base64.b64decode(result['solutions']) solutions = [] for solution_index in range(num_solutions): # Grab the section of the buffer related to the current buffer_index = solution_index * solution_bytes solution_buffer = binary[buffer_index:buffer_index + solution_bytes] bytes = struct.unpack('B' * solution_bytes, solution_buffer) # Assume None values solution = [default] * total_variables index = 0 for byte in bytes: # Parse each byte and read how ever many bits can be values = _decode_byte(byte) for _ in range(min(8, len(active_variables) - index)): i = active_variables[index] index += 1 solution[i] = values.pop() # Switch to the right variable space if msg['type'] == 'ising': values = {0: -1, 1: 1} solution = [values.get(v, default) for v in solution] solutions.append(solution) result['solutions'] = solutions return result
python
def decode_qp(msg): """Decode SAPI response that uses `qp` format, without numpy. The 'qp' format is the current encoding used for problems and samples. In this encoding the reply is generally json, but the samples, energy, and histogram data (the occurrence count of each solution), are all base64 encoded arrays. """ # Decode the simple buffers result = msg['answer'] result['active_variables'] = _decode_ints(result['active_variables']) active_variables = result['active_variables'] if 'num_occurrences' in result: result['num_occurrences'] = _decode_ints(result['num_occurrences']) result['energies'] = _decode_doubles(result['energies']) # Measure out the size of the binary solution data num_solutions = len(result['energies']) num_variables = len(result['active_variables']) solution_bytes = -(-num_variables // 8) # equivalent to int(math.ceil(num_variables / 8.)) total_variables = result['num_variables'] # Figure out the null value for output default = 3 if msg['type'] == 'qubo' else 0 # Decode the solutions, which will be byte aligned in binary format binary = base64.b64decode(result['solutions']) solutions = [] for solution_index in range(num_solutions): # Grab the section of the buffer related to the current buffer_index = solution_index * solution_bytes solution_buffer = binary[buffer_index:buffer_index + solution_bytes] bytes = struct.unpack('B' * solution_bytes, solution_buffer) # Assume None values solution = [default] * total_variables index = 0 for byte in bytes: # Parse each byte and read how ever many bits can be values = _decode_byte(byte) for _ in range(min(8, len(active_variables) - index)): i = active_variables[index] index += 1 solution[i] = values.pop() # Switch to the right variable space if msg['type'] == 'ising': values = {0: -1, 1: 1} solution = [values.get(v, default) for v in solution] solutions.append(solution) result['solutions'] = solutions return result
[ "def", "decode_qp", "(", "msg", ")", ":", "# Decode the simple buffers", "result", "=", "msg", "[", "'answer'", "]", "result", "[", "'active_variables'", "]", "=", "_decode_ints", "(", "result", "[", "'active_variables'", "]", ")", "active_variables", "=", "result", "[", "'active_variables'", "]", "if", "'num_occurrences'", "in", "result", ":", "result", "[", "'num_occurrences'", "]", "=", "_decode_ints", "(", "result", "[", "'num_occurrences'", "]", ")", "result", "[", "'energies'", "]", "=", "_decode_doubles", "(", "result", "[", "'energies'", "]", ")", "# Measure out the size of the binary solution data", "num_solutions", "=", "len", "(", "result", "[", "'energies'", "]", ")", "num_variables", "=", "len", "(", "result", "[", "'active_variables'", "]", ")", "solution_bytes", "=", "-", "(", "-", "num_variables", "//", "8", ")", "# equivalent to int(math.ceil(num_variables / 8.))", "total_variables", "=", "result", "[", "'num_variables'", "]", "# Figure out the null value for output", "default", "=", "3", "if", "msg", "[", "'type'", "]", "==", "'qubo'", "else", "0", "# Decode the solutions, which will be byte aligned in binary format", "binary", "=", "base64", ".", "b64decode", "(", "result", "[", "'solutions'", "]", ")", "solutions", "=", "[", "]", "for", "solution_index", "in", "range", "(", "num_solutions", ")", ":", "# Grab the section of the buffer related to the current", "buffer_index", "=", "solution_index", "*", "solution_bytes", "solution_buffer", "=", "binary", "[", "buffer_index", ":", "buffer_index", "+", "solution_bytes", "]", "bytes", "=", "struct", ".", "unpack", "(", "'B'", "*", "solution_bytes", ",", "solution_buffer", ")", "# Assume None values", "solution", "=", "[", "default", "]", "*", "total_variables", "index", "=", "0", "for", "byte", "in", "bytes", ":", "# Parse each byte and read how ever many bits can be", "values", "=", "_decode_byte", "(", "byte", ")", "for", "_", "in", "range", "(", "min", "(", "8", ",", "len", "(", "active_variables", ")", "-", "index", ")", ")", ":", "i", "=", "active_variables", "[", "index", "]", "index", "+=", "1", "solution", "[", "i", "]", "=", "values", ".", "pop", "(", ")", "# Switch to the right variable space", "if", "msg", "[", "'type'", "]", "==", "'ising'", ":", "values", "=", "{", "0", ":", "-", "1", ",", "1", ":", "1", "}", "solution", "=", "[", "values", ".", "get", "(", "v", ",", "default", ")", "for", "v", "in", "solution", "]", "solutions", ".", "append", "(", "solution", ")", "result", "[", "'solutions'", "]", "=", "solutions", "return", "result" ]
Decode SAPI response that uses `qp` format, without numpy. The 'qp' format is the current encoding used for problems and samples. In this encoding the reply is generally json, but the samples, energy, and histogram data (the occurrence count of each solution), are all base64 encoded arrays.
[ "Decode", "SAPI", "response", "that", "uses", "qp", "format", "without", "numpy", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/coders.py#L73-L125
dwavesystems/dwave-cloud-client
dwave/cloud/coders.py
_decode_byte
def _decode_byte(byte): """Helper for decode_qp, turns a single byte into a list of bits. Args: byte: byte to be decoded Returns: list of bits corresponding to byte """ bits = [] for _ in range(8): bits.append(byte & 1) byte >>= 1 return bits
python
def _decode_byte(byte): """Helper for decode_qp, turns a single byte into a list of bits. Args: byte: byte to be decoded Returns: list of bits corresponding to byte """ bits = [] for _ in range(8): bits.append(byte & 1) byte >>= 1 return bits
[ "def", "_decode_byte", "(", "byte", ")", ":", "bits", "=", "[", "]", "for", "_", "in", "range", "(", "8", ")", ":", "bits", ".", "append", "(", "byte", "&", "1", ")", "byte", ">>=", "1", "return", "bits" ]
Helper for decode_qp, turns a single byte into a list of bits. Args: byte: byte to be decoded Returns: list of bits corresponding to byte
[ "Helper", "for", "decode_qp", "turns", "a", "single", "byte", "into", "a", "list", "of", "bits", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/coders.py#L128-L141
dwavesystems/dwave-cloud-client
dwave/cloud/coders.py
_decode_ints
def _decode_ints(message): """Helper for decode_qp, decodes an int array. The int array is stored as little endian 32 bit integers. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. """ binary = base64.b64decode(message) return struct.unpack('<' + ('i' * (len(binary) // 4)), binary)
python
def _decode_ints(message): """Helper for decode_qp, decodes an int array. The int array is stored as little endian 32 bit integers. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. """ binary = base64.b64decode(message) return struct.unpack('<' + ('i' * (len(binary) // 4)), binary)
[ "def", "_decode_ints", "(", "message", ")", ":", "binary", "=", "base64", ".", "b64decode", "(", "message", ")", "return", "struct", ".", "unpack", "(", "'<'", "+", "(", "'i'", "*", "(", "len", "(", "binary", ")", "//", "4", ")", ")", ",", "binary", ")" ]
Helper for decode_qp, decodes an int array. The int array is stored as little endian 32 bit integers. The array has then been base64 encoded. Since we are decoding we do these steps in reverse.
[ "Helper", "for", "decode_qp", "decodes", "an", "int", "array", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/coders.py#L144-L152
dwavesystems/dwave-cloud-client
dwave/cloud/coders.py
_decode_doubles
def _decode_doubles(message): """Helper for decode_qp, decodes a double array. The double array is stored as little endian 64 bit doubles. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. Args: message: the double array Returns: decoded double array """ binary = base64.b64decode(message) return struct.unpack('<' + ('d' * (len(binary) // 8)), binary)
python
def _decode_doubles(message): """Helper for decode_qp, decodes a double array. The double array is stored as little endian 64 bit doubles. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. Args: message: the double array Returns: decoded double array """ binary = base64.b64decode(message) return struct.unpack('<' + ('d' * (len(binary) // 8)), binary)
[ "def", "_decode_doubles", "(", "message", ")", ":", "binary", "=", "base64", ".", "b64decode", "(", "message", ")", "return", "struct", ".", "unpack", "(", "'<'", "+", "(", "'d'", "*", "(", "len", "(", "binary", ")", "//", "8", ")", ")", ",", "binary", ")" ]
Helper for decode_qp, decodes a double array. The double array is stored as little endian 64 bit doubles. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. Args: message: the double array Returns: decoded double array
[ "Helper", "for", "decode_qp", "decodes", "a", "double", "array", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/coders.py#L155-L169
dwavesystems/dwave-cloud-client
dwave/cloud/coders.py
decode_qp_numpy
def decode_qp_numpy(msg, return_matrix=True): """Decode SAPI response, results in a `qp` format, explicitly using numpy. If numpy is not installed, the method will fail. To use numpy for decoding, but return the results a lists (instead of numpy matrices), set `return_matrix=False`. """ import numpy as np result = msg['answer'] # Build some little endian type encodings double_type = np.dtype(np.double) double_type = double_type.newbyteorder('<') int_type = np.dtype(np.int32) int_type = int_type.newbyteorder('<') # Decode the simple buffers result['energies'] = np.frombuffer(base64.b64decode(result['energies']), dtype=double_type) if 'num_occurrences' in result: result['num_occurrences'] = \ np.frombuffer(base64.b64decode(result['num_occurrences']), dtype=int_type) result['active_variables'] = \ np.frombuffer(base64.b64decode(result['active_variables']), dtype=int_type) # Measure out the binary data size num_solutions = len(result['energies']) active_variables = result['active_variables'] num_variables = len(active_variables) total_variables = result['num_variables'] # Decode the solutions, which will be a continuous run of bits byte_type = np.dtype(np.uint8) byte_type = byte_type.newbyteorder('<') bits = np.unpackbits(np.frombuffer(base64.b64decode(result['solutions']), dtype=byte_type)) # Clip off the extra bits from encoding if num_solutions: bits = np.reshape(bits, (num_solutions, bits.size // num_solutions)) bits = np.delete(bits, range(num_variables, bits.shape[1]), 1) # Switch from bits to spins default = 3 if msg['type'] == 'ising': bits = bits.astype(np.int8) bits *= 2 bits -= 1 default = 0 # Fill in the missing variables solutions = np.full((num_solutions, total_variables), default, dtype=np.int8) solutions[:, active_variables] = bits result['solutions'] = solutions # If the final result shouldn't be numpy formats switch back to python objects if not return_matrix: result['energies'] = result['energies'].tolist() if 'num_occurrences' in result: result['num_occurrences'] = result['num_occurrences'].tolist() result['active_variables'] = result['active_variables'].tolist() result['solutions'] = result['solutions'].tolist() return result
python
def decode_qp_numpy(msg, return_matrix=True): """Decode SAPI response, results in a `qp` format, explicitly using numpy. If numpy is not installed, the method will fail. To use numpy for decoding, but return the results a lists (instead of numpy matrices), set `return_matrix=False`. """ import numpy as np result = msg['answer'] # Build some little endian type encodings double_type = np.dtype(np.double) double_type = double_type.newbyteorder('<') int_type = np.dtype(np.int32) int_type = int_type.newbyteorder('<') # Decode the simple buffers result['energies'] = np.frombuffer(base64.b64decode(result['energies']), dtype=double_type) if 'num_occurrences' in result: result['num_occurrences'] = \ np.frombuffer(base64.b64decode(result['num_occurrences']), dtype=int_type) result['active_variables'] = \ np.frombuffer(base64.b64decode(result['active_variables']), dtype=int_type) # Measure out the binary data size num_solutions = len(result['energies']) active_variables = result['active_variables'] num_variables = len(active_variables) total_variables = result['num_variables'] # Decode the solutions, which will be a continuous run of bits byte_type = np.dtype(np.uint8) byte_type = byte_type.newbyteorder('<') bits = np.unpackbits(np.frombuffer(base64.b64decode(result['solutions']), dtype=byte_type)) # Clip off the extra bits from encoding if num_solutions: bits = np.reshape(bits, (num_solutions, bits.size // num_solutions)) bits = np.delete(bits, range(num_variables, bits.shape[1]), 1) # Switch from bits to spins default = 3 if msg['type'] == 'ising': bits = bits.astype(np.int8) bits *= 2 bits -= 1 default = 0 # Fill in the missing variables solutions = np.full((num_solutions, total_variables), default, dtype=np.int8) solutions[:, active_variables] = bits result['solutions'] = solutions # If the final result shouldn't be numpy formats switch back to python objects if not return_matrix: result['energies'] = result['energies'].tolist() if 'num_occurrences' in result: result['num_occurrences'] = result['num_occurrences'].tolist() result['active_variables'] = result['active_variables'].tolist() result['solutions'] = result['solutions'].tolist() return result
[ "def", "decode_qp_numpy", "(", "msg", ",", "return_matrix", "=", "True", ")", ":", "import", "numpy", "as", "np", "result", "=", "msg", "[", "'answer'", "]", "# Build some little endian type encodings", "double_type", "=", "np", ".", "dtype", "(", "np", ".", "double", ")", "double_type", "=", "double_type", ".", "newbyteorder", "(", "'<'", ")", "int_type", "=", "np", ".", "dtype", "(", "np", ".", "int32", ")", "int_type", "=", "int_type", ".", "newbyteorder", "(", "'<'", ")", "# Decode the simple buffers", "result", "[", "'energies'", "]", "=", "np", ".", "frombuffer", "(", "base64", ".", "b64decode", "(", "result", "[", "'energies'", "]", ")", ",", "dtype", "=", "double_type", ")", "if", "'num_occurrences'", "in", "result", ":", "result", "[", "'num_occurrences'", "]", "=", "np", ".", "frombuffer", "(", "base64", ".", "b64decode", "(", "result", "[", "'num_occurrences'", "]", ")", ",", "dtype", "=", "int_type", ")", "result", "[", "'active_variables'", "]", "=", "np", ".", "frombuffer", "(", "base64", ".", "b64decode", "(", "result", "[", "'active_variables'", "]", ")", ",", "dtype", "=", "int_type", ")", "# Measure out the binary data size", "num_solutions", "=", "len", "(", "result", "[", "'energies'", "]", ")", "active_variables", "=", "result", "[", "'active_variables'", "]", "num_variables", "=", "len", "(", "active_variables", ")", "total_variables", "=", "result", "[", "'num_variables'", "]", "# Decode the solutions, which will be a continuous run of bits", "byte_type", "=", "np", ".", "dtype", "(", "np", ".", "uint8", ")", "byte_type", "=", "byte_type", ".", "newbyteorder", "(", "'<'", ")", "bits", "=", "np", ".", "unpackbits", "(", "np", ".", "frombuffer", "(", "base64", ".", "b64decode", "(", "result", "[", "'solutions'", "]", ")", ",", "dtype", "=", "byte_type", ")", ")", "# Clip off the extra bits from encoding", "if", "num_solutions", ":", "bits", "=", "np", ".", "reshape", "(", "bits", ",", "(", "num_solutions", ",", "bits", ".", "size", "//", "num_solutions", ")", ")", "bits", "=", "np", ".", "delete", "(", "bits", ",", "range", "(", "num_variables", ",", "bits", ".", "shape", "[", "1", "]", ")", ",", "1", ")", "# Switch from bits to spins", "default", "=", "3", "if", "msg", "[", "'type'", "]", "==", "'ising'", ":", "bits", "=", "bits", ".", "astype", "(", "np", ".", "int8", ")", "bits", "*=", "2", "bits", "-=", "1", "default", "=", "0", "# Fill in the missing variables", "solutions", "=", "np", ".", "full", "(", "(", "num_solutions", ",", "total_variables", ")", ",", "default", ",", "dtype", "=", "np", ".", "int8", ")", "solutions", "[", ":", ",", "active_variables", "]", "=", "bits", "result", "[", "'solutions'", "]", "=", "solutions", "# If the final result shouldn't be numpy formats switch back to python objects", "if", "not", "return_matrix", ":", "result", "[", "'energies'", "]", "=", "result", "[", "'energies'", "]", ".", "tolist", "(", ")", "if", "'num_occurrences'", "in", "result", ":", "result", "[", "'num_occurrences'", "]", "=", "result", "[", "'num_occurrences'", "]", ".", "tolist", "(", ")", "result", "[", "'active_variables'", "]", "=", "result", "[", "'active_variables'", "]", ".", "tolist", "(", ")", "result", "[", "'solutions'", "]", "=", "result", "[", "'solutions'", "]", ".", "tolist", "(", ")", "return", "result" ]
Decode SAPI response, results in a `qp` format, explicitly using numpy. If numpy is not installed, the method will fail. To use numpy for decoding, but return the results a lists (instead of numpy matrices), set `return_matrix=False`.
[ "Decode", "SAPI", "response", "results", "in", "a", "qp", "format", "explicitly", "using", "numpy", ".", "If", "numpy", "is", "not", "installed", "the", "method", "will", "fail", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/coders.py#L172-L240
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
evaluate_ising
def evaluate_ising(linear, quad, state): """Calculate the energy of a state given the Hamiltonian. Args: linear: Linear Hamiltonian terms. quad: Quadratic Hamiltonian terms. state: Vector of spins describing the system state. Returns: Energy of the state evaluated by the given energy function. """ # If we were given a numpy array cast to list if _numpy and isinstance(state, np.ndarray): return evaluate_ising(linear, quad, state.tolist()) # Accumulate the linear and quadratic values energy = 0.0 for index, value in uniform_iterator(linear): energy += state[index] * value for (index_a, index_b), value in six.iteritems(quad): energy += value * state[index_a] * state[index_b] return energy
python
def evaluate_ising(linear, quad, state): """Calculate the energy of a state given the Hamiltonian. Args: linear: Linear Hamiltonian terms. quad: Quadratic Hamiltonian terms. state: Vector of spins describing the system state. Returns: Energy of the state evaluated by the given energy function. """ # If we were given a numpy array cast to list if _numpy and isinstance(state, np.ndarray): return evaluate_ising(linear, quad, state.tolist()) # Accumulate the linear and quadratic values energy = 0.0 for index, value in uniform_iterator(linear): energy += state[index] * value for (index_a, index_b), value in six.iteritems(quad): energy += value * state[index_a] * state[index_b] return energy
[ "def", "evaluate_ising", "(", "linear", ",", "quad", ",", "state", ")", ":", "# If we were given a numpy array cast to list", "if", "_numpy", "and", "isinstance", "(", "state", ",", "np", ".", "ndarray", ")", ":", "return", "evaluate_ising", "(", "linear", ",", "quad", ",", "state", ".", "tolist", "(", ")", ")", "# Accumulate the linear and quadratic values", "energy", "=", "0.0", "for", "index", ",", "value", "in", "uniform_iterator", "(", "linear", ")", ":", "energy", "+=", "state", "[", "index", "]", "*", "value", "for", "(", "index_a", ",", "index_b", ")", ",", "value", "in", "six", ".", "iteritems", "(", "quad", ")", ":", "energy", "+=", "value", "*", "state", "[", "index_a", "]", "*", "state", "[", "index_b", "]", "return", "energy" ]
Calculate the energy of a state given the Hamiltonian. Args: linear: Linear Hamiltonian terms. quad: Quadratic Hamiltonian terms. state: Vector of spins describing the system state. Returns: Energy of the state evaluated by the given energy function.
[ "Calculate", "the", "energy", "of", "a", "state", "given", "the", "Hamiltonian", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L49-L71
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
active_qubits
def active_qubits(linear, quadratic): """Calculate a set of all active qubits. Qubit is "active" if it has bias or coupling attached. Args: linear (dict[variable, bias]/list[variable, bias]): Linear terms of the model. quadratic (dict[(variable, variable), bias]): Quadratic terms of the model. Returns: set: Active qubits' indices. """ active = {idx for idx,bias in uniform_iterator(linear)} for edge, _ in six.iteritems(quadratic): active.update(edge) return active
python
def active_qubits(linear, quadratic): """Calculate a set of all active qubits. Qubit is "active" if it has bias or coupling attached. Args: linear (dict[variable, bias]/list[variable, bias]): Linear terms of the model. quadratic (dict[(variable, variable), bias]): Quadratic terms of the model. Returns: set: Active qubits' indices. """ active = {idx for idx,bias in uniform_iterator(linear)} for edge, _ in six.iteritems(quadratic): active.update(edge) return active
[ "def", "active_qubits", "(", "linear", ",", "quadratic", ")", ":", "active", "=", "{", "idx", "for", "idx", ",", "bias", "in", "uniform_iterator", "(", "linear", ")", "}", "for", "edge", ",", "_", "in", "six", ".", "iteritems", "(", "quadratic", ")", ":", "active", ".", "update", "(", "edge", ")", "return", "active" ]
Calculate a set of all active qubits. Qubit is "active" if it has bias or coupling attached. Args: linear (dict[variable, bias]/list[variable, bias]): Linear terms of the model. quadratic (dict[(variable, variable), bias]): Quadratic terms of the model. Returns: set: Active qubits' indices.
[ "Calculate", "a", "set", "of", "all", "active", "qubits", ".", "Qubit", "is", "active", "if", "it", "has", "bias", "or", "coupling", "attached", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L74-L93
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
generate_random_ising_problem
def generate_random_ising_problem(solver, h_range=None, j_range=None): """Generates an Ising problem formulation valid for a particular solver, using all qubits and all couplings and linear/quadratic biases sampled uniformly from `h_range`/`j_range`. """ if h_range is None: h_range = solver.properties.get('h_range', [-1, 1]) if j_range is None: j_range = solver.properties.get('j_range', [-1, 1]) lin = {qubit: random.uniform(*h_range) for qubit in solver.nodes} quad = {edge: random.uniform(*j_range) for edge in solver.undirected_edges} return lin, quad
python
def generate_random_ising_problem(solver, h_range=None, j_range=None): """Generates an Ising problem formulation valid for a particular solver, using all qubits and all couplings and linear/quadratic biases sampled uniformly from `h_range`/`j_range`. """ if h_range is None: h_range = solver.properties.get('h_range', [-1, 1]) if j_range is None: j_range = solver.properties.get('j_range', [-1, 1]) lin = {qubit: random.uniform(*h_range) for qubit in solver.nodes} quad = {edge: random.uniform(*j_range) for edge in solver.undirected_edges} return lin, quad
[ "def", "generate_random_ising_problem", "(", "solver", ",", "h_range", "=", "None", ",", "j_range", "=", "None", ")", ":", "if", "h_range", "is", "None", ":", "h_range", "=", "solver", ".", "properties", ".", "get", "(", "'h_range'", ",", "[", "-", "1", ",", "1", "]", ")", "if", "j_range", "is", "None", ":", "j_range", "=", "solver", ".", "properties", ".", "get", "(", "'j_range'", ",", "[", "-", "1", ",", "1", "]", ")", "lin", "=", "{", "qubit", ":", "random", ".", "uniform", "(", "*", "h_range", ")", "for", "qubit", "in", "solver", ".", "nodes", "}", "quad", "=", "{", "edge", ":", "random", ".", "uniform", "(", "*", "j_range", ")", "for", "edge", "in", "solver", ".", "undirected_edges", "}", "return", "lin", ",", "quad" ]
Generates an Ising problem formulation valid for a particular solver, using all qubits and all couplings and linear/quadratic biases sampled uniformly from `h_range`/`j_range`.
[ "Generates", "an", "Ising", "problem", "formulation", "valid", "for", "a", "particular", "solver", "using", "all", "qubits", "and", "all", "couplings", "and", "linear", "/", "quadratic", "biases", "sampled", "uniformly", "from", "h_range", "/", "j_range", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L96-L110
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
uniform_iterator
def uniform_iterator(sequence): """Uniform (key, value) iteration on a `dict`, or (idx, value) on a `list`.""" if isinstance(sequence, abc.Mapping): return six.iteritems(sequence) else: return enumerate(sequence)
python
def uniform_iterator(sequence): """Uniform (key, value) iteration on a `dict`, or (idx, value) on a `list`.""" if isinstance(sequence, abc.Mapping): return six.iteritems(sequence) else: return enumerate(sequence)
[ "def", "uniform_iterator", "(", "sequence", ")", ":", "if", "isinstance", "(", "sequence", ",", "abc", ".", "Mapping", ")", ":", "return", "six", ".", "iteritems", "(", "sequence", ")", "else", ":", "return", "enumerate", "(", "sequence", ")" ]
Uniform (key, value) iteration on a `dict`, or (idx, value) on a `list`.
[ "Uniform", "(", "key", "value", ")", "iteration", "on", "a", "dict", "or", "(", "idx", "value", ")", "on", "a", "list", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L113-L120
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
uniform_get
def uniform_get(sequence, index, default=None): """Uniform `dict`/`list` item getter, where `index` is interpreted as a key for maps and as numeric index for lists.""" if isinstance(sequence, abc.Mapping): return sequence.get(index, default) else: return sequence[index] if index < len(sequence) else default
python
def uniform_get(sequence, index, default=None): """Uniform `dict`/`list` item getter, where `index` is interpreted as a key for maps and as numeric index for lists.""" if isinstance(sequence, abc.Mapping): return sequence.get(index, default) else: return sequence[index] if index < len(sequence) else default
[ "def", "uniform_get", "(", "sequence", ",", "index", ",", "default", "=", "None", ")", ":", "if", "isinstance", "(", "sequence", ",", "abc", ".", "Mapping", ")", ":", "return", "sequence", ".", "get", "(", "index", ",", "default", ")", "else", ":", "return", "sequence", "[", "index", "]", "if", "index", "<", "len", "(", "sequence", ")", "else", "default" ]
Uniform `dict`/`list` item getter, where `index` is interpreted as a key for maps and as numeric index for lists.
[ "Uniform", "dict", "/", "list", "item", "getter", "where", "index", "is", "interpreted", "as", "a", "key", "for", "maps", "and", "as", "numeric", "index", "for", "lists", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L123-L130
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
strip_head
def strip_head(sequence, values): """Strips elements of `values` from the beginning of `sequence`.""" values = set(values) return list(itertools.dropwhile(lambda x: x in values, sequence))
python
def strip_head(sequence, values): """Strips elements of `values` from the beginning of `sequence`.""" values = set(values) return list(itertools.dropwhile(lambda x: x in values, sequence))
[ "def", "strip_head", "(", "sequence", ",", "values", ")", ":", "values", "=", "set", "(", "values", ")", "return", "list", "(", "itertools", ".", "dropwhile", "(", "lambda", "x", ":", "x", "in", "values", ",", "sequence", ")", ")" ]
Strips elements of `values` from the beginning of `sequence`.
[ "Strips", "elements", "of", "values", "from", "the", "beginning", "of", "sequence", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L133-L136
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
strip_tail
def strip_tail(sequence, values): """Strip `values` from the end of `sequence`.""" return list(reversed(list(strip_head(reversed(sequence), values))))
python
def strip_tail(sequence, values): """Strip `values` from the end of `sequence`.""" return list(reversed(list(strip_head(reversed(sequence), values))))
[ "def", "strip_tail", "(", "sequence", ",", "values", ")", ":", "return", "list", "(", "reversed", "(", "list", "(", "strip_head", "(", "reversed", "(", "sequence", ")", ",", "values", ")", ")", ")", ")" ]
Strip `values` from the end of `sequence`.
[ "Strip", "values", "from", "the", "end", "of", "sequence", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L139-L141
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
click_info_switch
def click_info_switch(f): """Decorator to create eager Click info switch option, as described in: http://click.pocoo.org/6/options/#callbacks-and-eager-options. Takes a no-argument function and abstracts the boilerplate required by Click (value checking, exit on done). Example: @click.option('--my-option', is_flag=True, callback=my_option, expose_value=False, is_eager=True) def test(): pass @click_info_switch def my_option() click.echo('some info related to my switch') """ @wraps(f) def wrapped(ctx, param, value): if not value or ctx.resilient_parsing: return f() ctx.exit() return wrapped
python
def click_info_switch(f): """Decorator to create eager Click info switch option, as described in: http://click.pocoo.org/6/options/#callbacks-and-eager-options. Takes a no-argument function and abstracts the boilerplate required by Click (value checking, exit on done). Example: @click.option('--my-option', is_flag=True, callback=my_option, expose_value=False, is_eager=True) def test(): pass @click_info_switch def my_option() click.echo('some info related to my switch') """ @wraps(f) def wrapped(ctx, param, value): if not value or ctx.resilient_parsing: return f() ctx.exit() return wrapped
[ "def", "click_info_switch", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapped", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "not", "value", "or", "ctx", ".", "resilient_parsing", ":", "return", "f", "(", ")", "ctx", ".", "exit", "(", ")", "return", "wrapped" ]
Decorator to create eager Click info switch option, as described in: http://click.pocoo.org/6/options/#callbacks-and-eager-options. Takes a no-argument function and abstracts the boilerplate required by Click (value checking, exit on done). Example: @click.option('--my-option', is_flag=True, callback=my_option, expose_value=False, is_eager=True) def test(): pass @click_info_switch def my_option() click.echo('some info related to my switch')
[ "Decorator", "to", "create", "eager", "Click", "info", "switch", "option", "as", "described", "in", ":", "http", ":", "//", "click", ".", "pocoo", ".", "org", "/", "6", "/", "options", "/", "#callbacks", "-", "and", "-", "eager", "-", "options", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L165-L190
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
datetime_to_timestamp
def datetime_to_timestamp(dt): """Convert timezone-aware `datetime` to POSIX timestamp and return seconds since UNIX epoch. Note: similar to `datetime.timestamp()` in Python 3.3+. """ epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC) return (dt - epoch).total_seconds()
python
def datetime_to_timestamp(dt): """Convert timezone-aware `datetime` to POSIX timestamp and return seconds since UNIX epoch. Note: similar to `datetime.timestamp()` in Python 3.3+. """ epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC) return (dt - epoch).total_seconds()
[ "def", "datetime_to_timestamp", "(", "dt", ")", ":", "epoch", "=", "datetime", ".", "utcfromtimestamp", "(", "0", ")", ".", "replace", "(", "tzinfo", "=", "UTC", ")", "return", "(", "dt", "-", "epoch", ")", ".", "total_seconds", "(", ")" ]
Convert timezone-aware `datetime` to POSIX timestamp and return seconds since UNIX epoch. Note: similar to `datetime.timestamp()` in Python 3.3+.
[ "Convert", "timezone", "-", "aware", "datetime", "to", "POSIX", "timestamp", "and", "return", "seconds", "since", "UNIX", "epoch", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L193-L201
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
user_agent
def user_agent(name, version): """Return User-Agent ~ "name/version language/version interpreter/version os/version".""" def _interpreter(): name = platform.python_implementation() version = platform.python_version() bitness = platform.architecture()[0] if name == 'PyPy': version = '.'.join(map(str, sys.pypy_version_info[:3])) full_version = [version] if bitness: full_version.append(bitness) return name, "-".join(full_version) tags = [ (name, version), ("python", platform.python_version()), _interpreter(), ("machine", platform.machine() or 'unknown'), ("system", platform.system() or 'unknown'), ("platform", platform.platform() or 'unknown'), ] return ' '.join("{}/{}".format(name, version) for name, version in tags)
python
def user_agent(name, version): """Return User-Agent ~ "name/version language/version interpreter/version os/version".""" def _interpreter(): name = platform.python_implementation() version = platform.python_version() bitness = platform.architecture()[0] if name == 'PyPy': version = '.'.join(map(str, sys.pypy_version_info[:3])) full_version = [version] if bitness: full_version.append(bitness) return name, "-".join(full_version) tags = [ (name, version), ("python", platform.python_version()), _interpreter(), ("machine", platform.machine() or 'unknown'), ("system", platform.system() or 'unknown'), ("platform", platform.platform() or 'unknown'), ] return ' '.join("{}/{}".format(name, version) for name, version in tags)
[ "def", "user_agent", "(", "name", ",", "version", ")", ":", "def", "_interpreter", "(", ")", ":", "name", "=", "platform", ".", "python_implementation", "(", ")", "version", "=", "platform", ".", "python_version", "(", ")", "bitness", "=", "platform", ".", "architecture", "(", ")", "[", "0", "]", "if", "name", "==", "'PyPy'", ":", "version", "=", "'.'", ".", "join", "(", "map", "(", "str", ",", "sys", ".", "pypy_version_info", "[", ":", "3", "]", ")", ")", "full_version", "=", "[", "version", "]", "if", "bitness", ":", "full_version", ".", "append", "(", "bitness", ")", "return", "name", ",", "\"-\"", ".", "join", "(", "full_version", ")", "tags", "=", "[", "(", "name", ",", "version", ")", ",", "(", "\"python\"", ",", "platform", ".", "python_version", "(", ")", ")", ",", "_interpreter", "(", ")", ",", "(", "\"machine\"", ",", "platform", ".", "machine", "(", ")", "or", "'unknown'", ")", ",", "(", "\"system\"", ",", "platform", ".", "system", "(", ")", "or", "'unknown'", ")", ",", "(", "\"platform\"", ",", "platform", ".", "platform", "(", ")", "or", "'unknown'", ")", ",", "]", "return", "' '", ".", "join", "(", "\"{}/{}\"", ".", "format", "(", "name", ",", "version", ")", "for", "name", ",", "version", "in", "tags", ")" ]
Return User-Agent ~ "name/version language/version interpreter/version os/version".
[ "Return", "User", "-", "Agent", "~", "name", "/", "version", "language", "/", "version", "interpreter", "/", "version", "os", "/", "version", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L252-L275
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
cached.argshash
def argshash(self, args, kwargs): "Hash mutable arguments' containers with immutable keys and values." a = repr(args) b = repr(sorted((repr(k), repr(v)) for k, v in kwargs.items())) return a + b
python
def argshash(self, args, kwargs): "Hash mutable arguments' containers with immutable keys and values." a = repr(args) b = repr(sorted((repr(k), repr(v)) for k, v in kwargs.items())) return a + b
[ "def", "argshash", "(", "self", ",", "args", ",", "kwargs", ")", ":", "a", "=", "repr", "(", "args", ")", "b", "=", "repr", "(", "sorted", "(", "(", "repr", "(", "k", ")", ",", "repr", "(", "v", ")", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ")", ")", "return", "a", "+", "b" ]
Hash mutable arguments' containers with immutable keys and values.
[ "Hash", "mutable", "arguments", "containers", "with", "immutable", "keys", "and", "values", "." ]
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L316-L320
bitlabstudio/django-user-media
user_media/views.py
UserMediaImageViewMixin.get_context_data
def get_context_data(self, **kwargs): """ Adds `next` to the context. This makes sure that the `next` parameter doesn't get lost if the form was submitted invalid. """ ctx = super(UserMediaImageViewMixin, self).get_context_data(**kwargs) ctx.update({ 'action': self.action, 'next': self.next, }) return ctx
python
def get_context_data(self, **kwargs): """ Adds `next` to the context. This makes sure that the `next` parameter doesn't get lost if the form was submitted invalid. """ ctx = super(UserMediaImageViewMixin, self).get_context_data(**kwargs) ctx.update({ 'action': self.action, 'next': self.next, }) return ctx
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "ctx", "=", "super", "(", "UserMediaImageViewMixin", ",", "self", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "ctx", ".", "update", "(", "{", "'action'", ":", "self", ".", "action", ",", "'next'", ":", "self", ".", "next", ",", "}", ")", "return", "ctx" ]
Adds `next` to the context. This makes sure that the `next` parameter doesn't get lost if the form was submitted invalid.
[ "Adds", "next", "to", "the", "context", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/views.py#L34-L47
bitlabstudio/django-user-media
user_media/views.py
UserMediaImageViewMixin.get_success_url
def get_success_url(self): """ Returns the success URL. This is either the given `next` URL parameter or the content object's `get_absolute_url` method's return value. """ if self.next: return self.next if self.object and self.object.content_object: return self.object.content_object.get_absolute_url() raise Exception( 'No content object given. Please provide ``next`` in your POST' ' data')
python
def get_success_url(self): """ Returns the success URL. This is either the given `next` URL parameter or the content object's `get_absolute_url` method's return value. """ if self.next: return self.next if self.object and self.object.content_object: return self.object.content_object.get_absolute_url() raise Exception( 'No content object given. Please provide ``next`` in your POST' ' data')
[ "def", "get_success_url", "(", "self", ")", ":", "if", "self", ".", "next", ":", "return", "self", ".", "next", "if", "self", ".", "object", "and", "self", ".", "object", ".", "content_object", ":", "return", "self", ".", "object", ".", "content_object", ".", "get_absolute_url", "(", ")", "raise", "Exception", "(", "'No content object given. Please provide ``next`` in your POST'", "' data'", ")" ]
Returns the success URL. This is either the given `next` URL parameter or the content object's `get_absolute_url` method's return value.
[ "Returns", "the", "success", "URL", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/views.py#L49-L63
bitlabstudio/django-user-media
user_media/views.py
CreateImageView.dispatch
def dispatch(self, request, *args, **kwargs): """Adds useful objects to the class and performs security checks.""" self._add_next_and_user(request) self.content_object = None self.content_type = None self.object_id = kwargs.get('object_id', None) if kwargs.get('content_type'): # Check if the user forged the URL and posted a non existant # content type try: self.content_type = ContentType.objects.get( model=kwargs.get('content_type')) except ContentType.DoesNotExist: raise Http404 if self.content_type: # Check if the user forged the URL and tries to append the image to # an object that does not exist try: self.content_object = \ self.content_type.get_object_for_this_type( pk=self.object_id) except ObjectDoesNotExist: raise Http404 if self.content_object and hasattr(self.content_object, 'user'): # Check if the user forged the URL and tries to append the image to # an object that does not belong to him if not self.content_object.user == self.user: raise Http404 return super(CreateImageView, self).dispatch(request, *args, **kwargs)
python
def dispatch(self, request, *args, **kwargs): """Adds useful objects to the class and performs security checks.""" self._add_next_and_user(request) self.content_object = None self.content_type = None self.object_id = kwargs.get('object_id', None) if kwargs.get('content_type'): # Check if the user forged the URL and posted a non existant # content type try: self.content_type = ContentType.objects.get( model=kwargs.get('content_type')) except ContentType.DoesNotExist: raise Http404 if self.content_type: # Check if the user forged the URL and tries to append the image to # an object that does not exist try: self.content_object = \ self.content_type.get_object_for_this_type( pk=self.object_id) except ObjectDoesNotExist: raise Http404 if self.content_object and hasattr(self.content_object, 'user'): # Check if the user forged the URL and tries to append the image to # an object that does not belong to him if not self.content_object.user == self.user: raise Http404 return super(CreateImageView, self).dispatch(request, *args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_add_next_and_user", "(", "request", ")", "self", ".", "content_object", "=", "None", "self", ".", "content_type", "=", "None", "self", ".", "object_id", "=", "kwargs", ".", "get", "(", "'object_id'", ",", "None", ")", "if", "kwargs", ".", "get", "(", "'content_type'", ")", ":", "# Check if the user forged the URL and posted a non existant", "# content type", "try", ":", "self", ".", "content_type", "=", "ContentType", ".", "objects", ".", "get", "(", "model", "=", "kwargs", ".", "get", "(", "'content_type'", ")", ")", "except", "ContentType", ".", "DoesNotExist", ":", "raise", "Http404", "if", "self", ".", "content_type", ":", "# Check if the user forged the URL and tries to append the image to", "# an object that does not exist", "try", ":", "self", ".", "content_object", "=", "self", ".", "content_type", ".", "get_object_for_this_type", "(", "pk", "=", "self", ".", "object_id", ")", "except", "ObjectDoesNotExist", ":", "raise", "Http404", "if", "self", ".", "content_object", "and", "hasattr", "(", "self", ".", "content_object", ",", "'user'", ")", ":", "# Check if the user forged the URL and tries to append the image to", "# an object that does not belong to him", "if", "not", "self", ".", "content_object", ".", "user", "==", "self", ".", "user", ":", "raise", "Http404", "return", "super", "(", "CreateImageView", ",", "self", ")", ".", "dispatch", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Adds useful objects to the class and performs security checks.
[ "Adds", "useful", "objects", "to", "the", "class", "and", "performs", "security", "checks", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/views.py#L72-L104
bitlabstudio/django-user-media
user_media/views.py
DeleteImageView.dispatch
def dispatch(self, request, *args, **kwargs): """Adds useful objects to the class.""" self._add_next_and_user(request) return super(DeleteImageView, self).dispatch(request, *args, **kwargs)
python
def dispatch(self, request, *args, **kwargs): """Adds useful objects to the class.""" self._add_next_and_user(request) return super(DeleteImageView, self).dispatch(request, *args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_add_next_and_user", "(", "request", ")", "return", "super", "(", "DeleteImageView", ",", "self", ")", ".", "dispatch", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Adds useful objects to the class.
[ "Adds", "useful", "objects", "to", "the", "class", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/views.py#L131-L134
bitlabstudio/django-user-media
user_media/views.py
DeleteImageView.get_queryset
def get_queryset(self): """ Making sure that a user can only delete his own images. Even when he forges the request URL. """ queryset = super(DeleteImageView, self).get_queryset() queryset = queryset.filter(user=self.user) return queryset
python
def get_queryset(self): """ Making sure that a user can only delete his own images. Even when he forges the request URL. """ queryset = super(DeleteImageView, self).get_queryset() queryset = queryset.filter(user=self.user) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "queryset", "=", "super", "(", "DeleteImageView", ",", "self", ")", ".", "get_queryset", "(", ")", "queryset", "=", "queryset", ".", "filter", "(", "user", "=", "self", ".", "user", ")", "return", "queryset" ]
Making sure that a user can only delete his own images. Even when he forges the request URL.
[ "Making", "sure", "that", "a", "user", "can", "only", "delete", "his", "own", "images", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/views.py#L143-L152
bitlabstudio/django-user-media
user_media/views.py
UpdateImageView.dispatch
def dispatch(self, request, *args, **kwargs): """Adds useful objects to the class.""" self._add_next_and_user(request) return super(UpdateImageView, self).dispatch(request, *args, **kwargs)
python
def dispatch(self, request, *args, **kwargs): """Adds useful objects to the class.""" self._add_next_and_user(request) return super(UpdateImageView, self).dispatch(request, *args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_add_next_and_user", "(", "request", ")", "return", "super", "(", "UpdateImageView", ",", "self", ")", ".", "dispatch", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Adds useful objects to the class.
[ "Adds", "useful", "objects", "to", "the", "class", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/views.py#L163-L166
bitlabstudio/django-user-media
user_media/views.py
UpdateImageView.get_queryset
def get_queryset(self): """ Making sure that a user can only edit his own images. Even when he forges the request URL. """ queryset = super(UpdateImageView, self).get_queryset() queryset = queryset.filter(user=self.user) return queryset
python
def get_queryset(self): """ Making sure that a user can only edit his own images. Even when he forges the request URL. """ queryset = super(UpdateImageView, self).get_queryset() queryset = queryset.filter(user=self.user) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "queryset", "=", "super", "(", "UpdateImageView", ",", "self", ")", ".", "get_queryset", "(", ")", "queryset", "=", "queryset", ".", "filter", "(", "user", "=", "self", ".", "user", ")", "return", "queryset" ]
Making sure that a user can only edit his own images. Even when he forges the request URL.
[ "Making", "sure", "that", "a", "user", "can", "only", "edit", "his", "own", "images", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/views.py#L186-L195
bitlabstudio/django-user-media
user_media/forms.py
UserMediaImageFormMixin._delete_images
def _delete_images(self, instance): """Deletes all user media images of the given instance.""" UserMediaImage.objects.filter( content_type=ContentType.objects.get_for_model(instance), object_id=instance.pk, user=instance.user, ).delete()
python
def _delete_images(self, instance): """Deletes all user media images of the given instance.""" UserMediaImage.objects.filter( content_type=ContentType.objects.get_for_model(instance), object_id=instance.pk, user=instance.user, ).delete()
[ "def", "_delete_images", "(", "self", ",", "instance", ")", ":", "UserMediaImage", ".", "objects", ".", "filter", "(", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "instance", ")", ",", "object_id", "=", "instance", ".", "pk", ",", "user", "=", "instance", ".", "user", ",", ")", ".", "delete", "(", ")" ]
Deletes all user media images of the given instance.
[ "Deletes", "all", "user", "media", "images", "of", "the", "given", "instance", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/forms.py#L51-L57
bitlabstudio/django-user-media
user_media/forms.py
UserMediaImageForm.clean_image
def clean_image(self): """ It seems like in Django 1.5 something has changed. When Django tries to validate the form, it checks if the generated filename fit into the max_length. But at this point, self.instance.user is not yet set so our filename generation function cannot create the new file path because it needs the user id. Setting self.instance.user at this point seems to work as a workaround. """ self.instance.user = self.user data = self.cleaned_data.get('image') return data
python
def clean_image(self): """ It seems like in Django 1.5 something has changed. When Django tries to validate the form, it checks if the generated filename fit into the max_length. But at this point, self.instance.user is not yet set so our filename generation function cannot create the new file path because it needs the user id. Setting self.instance.user at this point seems to work as a workaround. """ self.instance.user = self.user data = self.cleaned_data.get('image') return data
[ "def", "clean_image", "(", "self", ")", ":", "self", ".", "instance", ".", "user", "=", "self", ".", "user", "data", "=", "self", ".", "cleaned_data", ".", "get", "(", "'image'", ")", "return", "data" ]
It seems like in Django 1.5 something has changed. When Django tries to validate the form, it checks if the generated filename fit into the max_length. But at this point, self.instance.user is not yet set so our filename generation function cannot create the new file path because it needs the user id. Setting self.instance.user at this point seems to work as a workaround.
[ "It", "seems", "like", "in", "Django", "1", ".", "5", "something", "has", "changed", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/forms.py#L90-L103
bitlabstudio/django-user-media
user_media/models.py
get_image_file_path
def get_image_file_path(instance, filename): """Returns a unique filename for images.""" ext = filename.split('.')[-1] filename = '%s.%s' % (uuid.uuid4(), ext) return os.path.join( 'user_media', str(instance.user.pk), 'images', filename)
python
def get_image_file_path(instance, filename): """Returns a unique filename for images.""" ext = filename.split('.')[-1] filename = '%s.%s' % (uuid.uuid4(), ext) return os.path.join( 'user_media', str(instance.user.pk), 'images', filename)
[ "def", "get_image_file_path", "(", "instance", ",", "filename", ")", ":", "ext", "=", "filename", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "filename", "=", "'%s.%s'", "%", "(", "uuid", ".", "uuid4", "(", ")", ",", "ext", ")", "return", "os", ".", "path", ".", "join", "(", "'user_media'", ",", "str", "(", "instance", ".", "user", ".", "pk", ")", ",", "'images'", ",", "filename", ")" ]
Returns a unique filename for images.
[ "Returns", "a", "unique", "filename", "for", "images", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/models.py#L15-L20
bitlabstudio/django-user-media
user_media/models.py
image_post_delete_handler
def image_post_delete_handler(sender, instance, **kwargs): """ Makes sure that a an image is also deleted from the media directory. This should prevent a load of "dead" image files on disc. """ for f in glob.glob('{}/{}*'.format(instance.image.storage.location, instance.image.name)): if not os.path.isdir(f): instance.image.storage.delete(f)
python
def image_post_delete_handler(sender, instance, **kwargs): """ Makes sure that a an image is also deleted from the media directory. This should prevent a load of "dead" image files on disc. """ for f in glob.glob('{}/{}*'.format(instance.image.storage.location, instance.image.name)): if not os.path.isdir(f): instance.image.storage.delete(f)
[ "def", "image_post_delete_handler", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "for", "f", "in", "glob", ".", "glob", "(", "'{}/{}*'", ".", "format", "(", "instance", ".", "image", ".", "storage", ".", "location", ",", "instance", ".", "image", ".", "name", ")", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "f", ")", ":", "instance", ".", "image", ".", "storage", ".", "delete", "(", "f", ")" ]
Makes sure that a an image is also deleted from the media directory. This should prevent a load of "dead" image files on disc.
[ "Makes", "sure", "that", "a", "an", "image", "is", "also", "deleted", "from", "the", "media", "directory", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/models.py#L138-L148
bitlabstudio/django-user-media
user_media/models.py
UserMediaImage.box_coordinates
def box_coordinates(self): """Returns a thumbnail's coordinates.""" if ( self.thumb_x is not None and self.thumb_y is not None and self.thumb_x2 is not None and self.thumb_y2 is not None ): return ( int(self.thumb_x), int(self.thumb_y), int(self.thumb_x2), int(self.thumb_y2), ) return False
python
def box_coordinates(self): """Returns a thumbnail's coordinates.""" if ( self.thumb_x is not None and self.thumb_y is not None and self.thumb_x2 is not None and self.thumb_y2 is not None ): return ( int(self.thumb_x), int(self.thumb_y), int(self.thumb_x2), int(self.thumb_y2), ) return False
[ "def", "box_coordinates", "(", "self", ")", ":", "if", "(", "self", ".", "thumb_x", "is", "not", "None", "and", "self", ".", "thumb_y", "is", "not", "None", "and", "self", ".", "thumb_x2", "is", "not", "None", "and", "self", ".", "thumb_y2", "is", "not", "None", ")", ":", "return", "(", "int", "(", "self", ".", "thumb_x", ")", ",", "int", "(", "self", ".", "thumb_y", ")", ",", "int", "(", "self", ".", "thumb_x2", ")", ",", "int", "(", "self", ".", "thumb_y2", ")", ",", ")", "return", "False" ]
Returns a thumbnail's coordinates.
[ "Returns", "a", "thumbnail", "s", "coordinates", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/models.py#L106-L120
bitlabstudio/django-user-media
user_media/models.py
UserMediaImage.large_size
def large_size(self, as_string=True): """Returns a thumbnail's large size.""" size = getattr(settings, 'USER_MEDIA_THUMB_SIZE_LARGE', (150, 150)) if as_string: return u'{}x{}'.format(size[0], size[1]) return size
python
def large_size(self, as_string=True): """Returns a thumbnail's large size.""" size = getattr(settings, 'USER_MEDIA_THUMB_SIZE_LARGE', (150, 150)) if as_string: return u'{}x{}'.format(size[0], size[1]) return size
[ "def", "large_size", "(", "self", ",", "as_string", "=", "True", ")", ":", "size", "=", "getattr", "(", "settings", ",", "'USER_MEDIA_THUMB_SIZE_LARGE'", ",", "(", "150", ",", "150", ")", ")", "if", "as_string", ":", "return", "u'{}x{}'", ".", "format", "(", "size", "[", "0", "]", ",", "size", "[", "1", "]", ")", "return", "size" ]
Returns a thumbnail's large size.
[ "Returns", "a", "thumbnail", "s", "large", "size", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/models.py#L122-L127
bitlabstudio/django-user-media
user_media/processors.py
crop_box
def crop_box(im, box=False, **kwargs): """Uses box coordinates to crop an image without resizing it first.""" if box: im = im.crop(box) return im
python
def crop_box(im, box=False, **kwargs): """Uses box coordinates to crop an image without resizing it first.""" if box: im = im.crop(box) return im
[ "def", "crop_box", "(", "im", ",", "box", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "box", ":", "im", "=", "im", ".", "crop", "(", "box", ")", "return", "im" ]
Uses box coordinates to crop an image without resizing it first.
[ "Uses", "box", "coordinates", "to", "crop", "an", "image", "without", "resizing", "it", "first", "." ]
train
https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/processors.py#L4-L8
wroberts/pygermanet
pygermanet/germanet.py
load_germanet
def load_germanet(host = None, port = None, database_name = 'germanet'): ''' Loads a GermaNet instance connected to the given MongoDB instance. Arguments: - `host`: the hostname of the MongoDB instance - `port`: the port number of the MongoDB instance - `database_name`: the name of the GermaNet database on the MongoDB instance ''' client = MongoClient(host, port) germanet_db = client[database_name] return GermaNet(germanet_db)
python
def load_germanet(host = None, port = None, database_name = 'germanet'): ''' Loads a GermaNet instance connected to the given MongoDB instance. Arguments: - `host`: the hostname of the MongoDB instance - `port`: the port number of the MongoDB instance - `database_name`: the name of the GermaNet database on the MongoDB instance ''' client = MongoClient(host, port) germanet_db = client[database_name] return GermaNet(germanet_db)
[ "def", "load_germanet", "(", "host", "=", "None", ",", "port", "=", "None", ",", "database_name", "=", "'germanet'", ")", ":", "client", "=", "MongoClient", "(", "host", ",", "port", ")", "germanet_db", "=", "client", "[", "database_name", "]", "return", "GermaNet", "(", "germanet_db", ")" ]
Loads a GermaNet instance connected to the given MongoDB instance. Arguments: - `host`: the hostname of the MongoDB instance - `port`: the port number of the MongoDB instance - `database_name`: the name of the GermaNet database on the MongoDB instance
[ "Loads", "a", "GermaNet", "instance", "connected", "to", "the", "given", "MongoDB", "instance", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L664-L676
wroberts/pygermanet
pygermanet/germanet.py
GermaNet.cache_size
def cache_size(self, new_value): ''' Set the cache size used to reduce the number of database access operations. ''' if type(new_value) == int and 0 < new_value: if self._lemma_cache is not None: self._lemma_cache = repoze.lru.LRUCache(new_value) self._synset_cache = repoze.lru.LRUCache(new_value)
python
def cache_size(self, new_value): ''' Set the cache size used to reduce the number of database access operations. ''' if type(new_value) == int and 0 < new_value: if self._lemma_cache is not None: self._lemma_cache = repoze.lru.LRUCache(new_value) self._synset_cache = repoze.lru.LRUCache(new_value)
[ "def", "cache_size", "(", "self", ",", "new_value", ")", ":", "if", "type", "(", "new_value", ")", "==", "int", "and", "0", "<", "new_value", ":", "if", "self", ".", "_lemma_cache", "is", "not", "None", ":", "self", ".", "_lemma_cache", "=", "repoze", ".", "lru", ".", "LRUCache", "(", "new_value", ")", "self", ".", "_synset_cache", "=", "repoze", ".", "lru", ".", "LRUCache", "(", "new_value", ")" ]
Set the cache size used to reduce the number of database access operations.
[ "Set", "the", "cache", "size", "used", "to", "reduce", "the", "number", "of", "database", "access", "operations", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L75-L83
wroberts/pygermanet
pygermanet/germanet.py
GermaNet.all_lemmas
def all_lemmas(self): ''' A generator over all the lemmas in the GermaNet database. ''' for lemma_dict in self._mongo_db.lexunits.find(): yield Lemma(self, lemma_dict)
python
def all_lemmas(self): ''' A generator over all the lemmas in the GermaNet database. ''' for lemma_dict in self._mongo_db.lexunits.find(): yield Lemma(self, lemma_dict)
[ "def", "all_lemmas", "(", "self", ")", ":", "for", "lemma_dict", "in", "self", ".", "_mongo_db", ".", "lexunits", ".", "find", "(", ")", ":", "yield", "Lemma", "(", "self", ",", "lemma_dict", ")" ]
A generator over all the lemmas in the GermaNet database.
[ "A", "generator", "over", "all", "the", "lemmas", "in", "the", "GermaNet", "database", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L85-L90
wroberts/pygermanet
pygermanet/germanet.py
GermaNet.lemmas
def lemmas(self, lemma, pos = None): ''' Looks up lemmas in the GermaNet database. Arguments: - `lemma`: - `pos`: ''' if pos is not None: if pos not in SHORT_POS_TO_LONG: return None pos = SHORT_POS_TO_LONG[pos] lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma, 'category': pos}) else: lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma}) return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
python
def lemmas(self, lemma, pos = None): ''' Looks up lemmas in the GermaNet database. Arguments: - `lemma`: - `pos`: ''' if pos is not None: if pos not in SHORT_POS_TO_LONG: return None pos = SHORT_POS_TO_LONG[pos] lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma, 'category': pos}) else: lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma}) return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
[ "def", "lemmas", "(", "self", ",", "lemma", ",", "pos", "=", "None", ")", ":", "if", "pos", "is", "not", "None", ":", "if", "pos", "not", "in", "SHORT_POS_TO_LONG", ":", "return", "None", "pos", "=", "SHORT_POS_TO_LONG", "[", "pos", "]", "lemma_dicts", "=", "self", ".", "_mongo_db", ".", "lexunits", ".", "find", "(", "{", "'orthForm'", ":", "lemma", ",", "'category'", ":", "pos", "}", ")", "else", ":", "lemma_dicts", "=", "self", ".", "_mongo_db", ".", "lexunits", ".", "find", "(", "{", "'orthForm'", ":", "lemma", "}", ")", "return", "sorted", "(", "[", "Lemma", "(", "self", ",", "lemma_dict", ")", "for", "lemma_dict", "in", "lemma_dicts", "]", ")" ]
Looks up lemmas in the GermaNet database. Arguments: - `lemma`: - `pos`:
[ "Looks", "up", "lemmas", "in", "the", "GermaNet", "database", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L92-L108
wroberts/pygermanet
pygermanet/germanet.py
GermaNet.all_synsets
def all_synsets(self): ''' A generator over all the synsets in the GermaNet database. ''' for synset_dict in self._mongo_db.synsets.find(): yield Synset(self, synset_dict)
python
def all_synsets(self): ''' A generator over all the synsets in the GermaNet database. ''' for synset_dict in self._mongo_db.synsets.find(): yield Synset(self, synset_dict)
[ "def", "all_synsets", "(", "self", ")", ":", "for", "synset_dict", "in", "self", ".", "_mongo_db", ".", "synsets", ".", "find", "(", ")", ":", "yield", "Synset", "(", "self", ",", "synset_dict", ")" ]
A generator over all the synsets in the GermaNet database.
[ "A", "generator", "over", "all", "the", "synsets", "in", "the", "GermaNet", "database", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L110-L115
wroberts/pygermanet
pygermanet/germanet.py
GermaNet.synsets
def synsets(self, lemma, pos = None): ''' Looks up synsets in the GermaNet database. Arguments: - `lemma`: - `pos`: ''' return sorted(set(lemma_obj.synset for lemma_obj in self.lemmas(lemma, pos)))
python
def synsets(self, lemma, pos = None): ''' Looks up synsets in the GermaNet database. Arguments: - `lemma`: - `pos`: ''' return sorted(set(lemma_obj.synset for lemma_obj in self.lemmas(lemma, pos)))
[ "def", "synsets", "(", "self", ",", "lemma", ",", "pos", "=", "None", ")", ":", "return", "sorted", "(", "set", "(", "lemma_obj", ".", "synset", "for", "lemma_obj", "in", "self", ".", "lemmas", "(", "lemma", ",", "pos", ")", ")", ")" ]
Looks up synsets in the GermaNet database. Arguments: - `lemma`: - `pos`:
[ "Looks", "up", "synsets", "in", "the", "GermaNet", "database", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L117-L126
wroberts/pygermanet
pygermanet/germanet.py
GermaNet.synset
def synset(self, synset_repr): ''' Looks up a synset in GermaNet using its string representation. Arguments: - `synset_repr`: a unicode string containing the lemma, part of speech, and sense number of the first lemma of the synset >>> gn.synset(u'funktionieren.v.2') Synset(funktionieren.v.2) ''' parts = synset_repr.split('.') if len(parts) != 3: return None lemma, pos, sensenum = parts if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG: return None sensenum = int(sensenum, 10) pos = SHORT_POS_TO_LONG[pos] lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma, 'category': pos, 'sense': sensenum}) if lemma_dict: return Lemma(self, lemma_dict).synset
python
def synset(self, synset_repr): ''' Looks up a synset in GermaNet using its string representation. Arguments: - `synset_repr`: a unicode string containing the lemma, part of speech, and sense number of the first lemma of the synset >>> gn.synset(u'funktionieren.v.2') Synset(funktionieren.v.2) ''' parts = synset_repr.split('.') if len(parts) != 3: return None lemma, pos, sensenum = parts if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG: return None sensenum = int(sensenum, 10) pos = SHORT_POS_TO_LONG[pos] lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma, 'category': pos, 'sense': sensenum}) if lemma_dict: return Lemma(self, lemma_dict).synset
[ "def", "synset", "(", "self", ",", "synset_repr", ")", ":", "parts", "=", "synset_repr", ".", "split", "(", "'.'", ")", "if", "len", "(", "parts", ")", "!=", "3", ":", "return", "None", "lemma", ",", "pos", ",", "sensenum", "=", "parts", "if", "not", "sensenum", ".", "isdigit", "(", ")", "or", "pos", "not", "in", "SHORT_POS_TO_LONG", ":", "return", "None", "sensenum", "=", "int", "(", "sensenum", ",", "10", ")", "pos", "=", "SHORT_POS_TO_LONG", "[", "pos", "]", "lemma_dict", "=", "self", ".", "_mongo_db", ".", "lexunits", ".", "find_one", "(", "{", "'orthForm'", ":", "lemma", ",", "'category'", ":", "pos", ",", "'sense'", ":", "sensenum", "}", ")", "if", "lemma_dict", ":", "return", "Lemma", "(", "self", ",", "lemma_dict", ")", ".", "synset" ]
Looks up a synset in GermaNet using its string representation. Arguments: - `synset_repr`: a unicode string containing the lemma, part of speech, and sense number of the first lemma of the synset >>> gn.synset(u'funktionieren.v.2') Synset(funktionieren.v.2)
[ "Looks", "up", "a", "synset", "in", "GermaNet", "using", "its", "string", "representation", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L128-L151
wroberts/pygermanet
pygermanet/germanet.py
GermaNet.get_synset_by_id
def get_synset_by_id(self, mongo_id): ''' Builds a Synset object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object ''' cache_hit = None if self._synset_cache is not None: cache_hit = self._synset_cache.get(mongo_id) if cache_hit is not None: return cache_hit synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id}) if synset_dict is not None: synset = Synset(self, synset_dict) if self._synset_cache is not None: self._synset_cache.put(mongo_id, synset) return synset
python
def get_synset_by_id(self, mongo_id): ''' Builds a Synset object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object ''' cache_hit = None if self._synset_cache is not None: cache_hit = self._synset_cache.get(mongo_id) if cache_hit is not None: return cache_hit synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id}) if synset_dict is not None: synset = Synset(self, synset_dict) if self._synset_cache is not None: self._synset_cache.put(mongo_id, synset) return synset
[ "def", "get_synset_by_id", "(", "self", ",", "mongo_id", ")", ":", "cache_hit", "=", "None", "if", "self", ".", "_synset_cache", "is", "not", "None", ":", "cache_hit", "=", "self", ".", "_synset_cache", ".", "get", "(", "mongo_id", ")", "if", "cache_hit", "is", "not", "None", ":", "return", "cache_hit", "synset_dict", "=", "self", ".", "_mongo_db", ".", "synsets", ".", "find_one", "(", "{", "'_id'", ":", "mongo_id", "}", ")", "if", "synset_dict", "is", "not", "None", ":", "synset", "=", "Synset", "(", "self", ",", "synset_dict", ")", "if", "self", ".", "_synset_cache", "is", "not", "None", ":", "self", ".", "_synset_cache", ".", "put", "(", "mongo_id", ",", "synset", ")", "return", "synset" ]
Builds a Synset object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object
[ "Builds", "a", "Synset", "object", "from", "the", "database", "entry", "with", "the", "given", "ObjectId", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L153-L171
wroberts/pygermanet
pygermanet/germanet.py
GermaNet.get_lemma_by_id
def get_lemma_by_id(self, mongo_id): ''' Builds a Lemma object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object ''' cache_hit = None if self._lemma_cache is not None: cache_hit = self._lemma_cache.get(mongo_id) if cache_hit is not None: return cache_hit lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id}) if lemma_dict is not None: lemma = Lemma(self, lemma_dict) if self._lemma_cache is not None: self._lemma_cache.put(mongo_id, lemma) return lemma
python
def get_lemma_by_id(self, mongo_id): ''' Builds a Lemma object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object ''' cache_hit = None if self._lemma_cache is not None: cache_hit = self._lemma_cache.get(mongo_id) if cache_hit is not None: return cache_hit lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id}) if lemma_dict is not None: lemma = Lemma(self, lemma_dict) if self._lemma_cache is not None: self._lemma_cache.put(mongo_id, lemma) return lemma
[ "def", "get_lemma_by_id", "(", "self", ",", "mongo_id", ")", ":", "cache_hit", "=", "None", "if", "self", ".", "_lemma_cache", "is", "not", "None", ":", "cache_hit", "=", "self", ".", "_lemma_cache", ".", "get", "(", "mongo_id", ")", "if", "cache_hit", "is", "not", "None", ":", "return", "cache_hit", "lemma_dict", "=", "self", ".", "_mongo_db", ".", "lexunits", ".", "find_one", "(", "{", "'_id'", ":", "mongo_id", "}", ")", "if", "lemma_dict", "is", "not", "None", ":", "lemma", "=", "Lemma", "(", "self", ",", "lemma_dict", ")", "if", "self", ".", "_lemma_cache", "is", "not", "None", ":", "self", ".", "_lemma_cache", ".", "put", "(", "mongo_id", ",", "lemma", ")", "return", "lemma" ]
Builds a Lemma object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object
[ "Builds", "a", "Lemma", "object", "from", "the", "database", "entry", "with", "the", "given", "ObjectId", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L173-L191
wroberts/pygermanet
pygermanet/germanet.py
GermaNet.lemmatise
def lemmatise(self, word): ''' Tries to find the base form (lemma) of the given word, using the data provided by the Projekt deutscher Wortschatz. This method returns a list of potential lemmas. >>> gn.lemmatise(u'Männer') [u'Mann'] >>> gn.lemmatise(u'XYZ123') [u'XYZ123'] ''' lemmas = list(self._mongo_db.lemmatiser.find({'word': word})) if lemmas: return [lemma['lemma'] for lemma in lemmas] else: return [word]
python
def lemmatise(self, word): ''' Tries to find the base form (lemma) of the given word, using the data provided by the Projekt deutscher Wortschatz. This method returns a list of potential lemmas. >>> gn.lemmatise(u'Männer') [u'Mann'] >>> gn.lemmatise(u'XYZ123') [u'XYZ123'] ''' lemmas = list(self._mongo_db.lemmatiser.find({'word': word})) if lemmas: return [lemma['lemma'] for lemma in lemmas] else: return [word]
[ "def", "lemmatise", "(", "self", ",", "word", ")", ":", "lemmas", "=", "list", "(", "self", ".", "_mongo_db", ".", "lemmatiser", ".", "find", "(", "{", "'word'", ":", "word", "}", ")", ")", "if", "lemmas", ":", "return", "[", "lemma", "[", "'lemma'", "]", "for", "lemma", "in", "lemmas", "]", "else", ":", "return", "[", "word", "]" ]
Tries to find the base form (lemma) of the given word, using the data provided by the Projekt deutscher Wortschatz. This method returns a list of potential lemmas. >>> gn.lemmatise(u'Männer') [u'Mann'] >>> gn.lemmatise(u'XYZ123') [u'XYZ123']
[ "Tries", "to", "find", "the", "base", "form", "(", "lemma", ")", "of", "the", "given", "word", "using", "the", "data", "provided", "by", "the", "Projekt", "deutscher", "Wortschatz", ".", "This", "method", "returns", "a", "list", "of", "potential", "lemmas", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L193-L208
wroberts/pygermanet
pygermanet/mongo_import.py
find_germanet_xml_files
def find_germanet_xml_files(xml_path): ''' Globs the XML files contained in the given directory and sorts them into sections for import into the MongoDB database. Arguments: - `xml_path`: the path to the directory containing the GermaNet XML files ''' xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml'))) # sort out the lexical files lex_files = [xml_file for xml_file in xml_files if re.match(r'(adj|nomen|verben)\.', os.path.basename(xml_file).lower())] xml_files = sorted(set(xml_files) - set(lex_files)) if not lex_files: print('ERROR: cannot find lexical information files') # sort out the GermaNet relations file gn_rels_file = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower() == 'gn_relations.xml'] xml_files = sorted(set(xml_files) - set(gn_rels_file)) if not gn_rels_file: print('ERROR: cannot find relations file gn_relations.xml') gn_rels_file = None else: if 1 < len(gn_rels_file): print ('WARNING: more than one relations file gn_relations.xml, ' 'taking first match') gn_rels_file = gn_rels_file[0] # sort out the wiktionary paraphrase files wiktionary_files = [xml_file for xml_file in xml_files if re.match(r'wiktionaryparaphrases-', os.path.basename(xml_file).lower())] xml_files = sorted(set(xml_files) - set(wiktionary_files)) if not wiktionary_files: print('WARNING: cannot find wiktionary paraphrase files') # sort out the interlingual index file ili_files = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower().startswith( 'interlingualindex')] xml_files = sorted(set(xml_files) - set(ili_files)) if not ili_files: print('WARNING: cannot find interlingual index file') if xml_files: print('WARNING: unrecognised xml files:', xml_files) return lex_files, gn_rels_file, wiktionary_files, ili_files
python
def find_germanet_xml_files(xml_path): ''' Globs the XML files contained in the given directory and sorts them into sections for import into the MongoDB database. Arguments: - `xml_path`: the path to the directory containing the GermaNet XML files ''' xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml'))) # sort out the lexical files lex_files = [xml_file for xml_file in xml_files if re.match(r'(adj|nomen|verben)\.', os.path.basename(xml_file).lower())] xml_files = sorted(set(xml_files) - set(lex_files)) if not lex_files: print('ERROR: cannot find lexical information files') # sort out the GermaNet relations file gn_rels_file = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower() == 'gn_relations.xml'] xml_files = sorted(set(xml_files) - set(gn_rels_file)) if not gn_rels_file: print('ERROR: cannot find relations file gn_relations.xml') gn_rels_file = None else: if 1 < len(gn_rels_file): print ('WARNING: more than one relations file gn_relations.xml, ' 'taking first match') gn_rels_file = gn_rels_file[0] # sort out the wiktionary paraphrase files wiktionary_files = [xml_file for xml_file in xml_files if re.match(r'wiktionaryparaphrases-', os.path.basename(xml_file).lower())] xml_files = sorted(set(xml_files) - set(wiktionary_files)) if not wiktionary_files: print('WARNING: cannot find wiktionary paraphrase files') # sort out the interlingual index file ili_files = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower().startswith( 'interlingualindex')] xml_files = sorted(set(xml_files) - set(ili_files)) if not ili_files: print('WARNING: cannot find interlingual index file') if xml_files: print('WARNING: unrecognised xml files:', xml_files) return lex_files, gn_rels_file, wiktionary_files, ili_files
[ "def", "find_germanet_xml_files", "(", "xml_path", ")", ":", "xml_files", "=", "sorted", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "xml_path", ",", "'*.xml'", ")", ")", ")", "# sort out the lexical files", "lex_files", "=", "[", "xml_file", "for", "xml_file", "in", "xml_files", "if", "re", ".", "match", "(", "r'(adj|nomen|verben)\\.'", ",", "os", ".", "path", ".", "basename", "(", "xml_file", ")", ".", "lower", "(", ")", ")", "]", "xml_files", "=", "sorted", "(", "set", "(", "xml_files", ")", "-", "set", "(", "lex_files", ")", ")", "if", "not", "lex_files", ":", "print", "(", "'ERROR: cannot find lexical information files'", ")", "# sort out the GermaNet relations file", "gn_rels_file", "=", "[", "xml_file", "for", "xml_file", "in", "xml_files", "if", "os", ".", "path", ".", "basename", "(", "xml_file", ")", ".", "lower", "(", ")", "==", "'gn_relations.xml'", "]", "xml_files", "=", "sorted", "(", "set", "(", "xml_files", ")", "-", "set", "(", "gn_rels_file", ")", ")", "if", "not", "gn_rels_file", ":", "print", "(", "'ERROR: cannot find relations file gn_relations.xml'", ")", "gn_rels_file", "=", "None", "else", ":", "if", "1", "<", "len", "(", "gn_rels_file", ")", ":", "print", "(", "'WARNING: more than one relations file gn_relations.xml, '", "'taking first match'", ")", "gn_rels_file", "=", "gn_rels_file", "[", "0", "]", "# sort out the wiktionary paraphrase files", "wiktionary_files", "=", "[", "xml_file", "for", "xml_file", "in", "xml_files", "if", "re", ".", "match", "(", "r'wiktionaryparaphrases-'", ",", "os", ".", "path", ".", "basename", "(", "xml_file", ")", ".", "lower", "(", ")", ")", "]", "xml_files", "=", "sorted", "(", "set", "(", "xml_files", ")", "-", "set", "(", "wiktionary_files", ")", ")", "if", "not", "wiktionary_files", ":", "print", "(", "'WARNING: cannot find wiktionary paraphrase files'", ")", "# sort out the interlingual index file", "ili_files", "=", "[", "xml_file", "for", "xml_file", "in", "xml_files", "if", "os", ".", "path", ".", "basename", "(", "xml_file", ")", ".", "lower", "(", ")", ".", "startswith", "(", "'interlingualindex'", ")", "]", "xml_files", "=", "sorted", "(", "set", "(", "xml_files", ")", "-", "set", "(", "ili_files", ")", ")", "if", "not", "ili_files", ":", "print", "(", "'WARNING: cannot find interlingual index file'", ")", "if", "xml_files", ":", "print", "(", "'WARNING: unrecognised xml files:'", ",", "xml_files", ")", "return", "lex_files", ",", "gn_rels_file", ",", "wiktionary_files", ",", "ili_files" ]
Globs the XML files contained in the given directory and sorts them into sections for import into the MongoDB database. Arguments: - `xml_path`: the path to the directory containing the GermaNet XML files
[ "Globs", "the", "XML", "files", "contained", "in", "the", "given", "directory", "and", "sorts", "them", "into", "sections", "for", "import", "into", "the", "MongoDB", "database", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L30-L85
wroberts/pygermanet
pygermanet/mongo_import.py
warn_attribs
def warn_attribs(loc, node, recognised_attribs, reqd_attribs=None): ''' Error checking of XML input: check that the given node has certain required attributes, and does not have any unrecognised attributes. Arguments: - `loc`: a string with some information about the location of the error in the XML file - `node`: the node to check - `recognised_attribs`: a set of node attributes which we know how to handle - `reqd_attribs`: a set of node attributes which we require to be present; if this argument is None, it will take the same value as `recognised_attribs` ''' if reqd_attribs is None: reqd_attribs = recognised_attribs found_attribs = set(node.keys()) if reqd_attribs - found_attribs: print(loc, 'missing <{0}> attributes'.format(node.tag), reqd_attribs - found_attribs) if found_attribs - recognised_attribs: print(loc, 'unrecognised <{0}> properties'.format(node.tag), found_attribs - recognised_attribs)
python
def warn_attribs(loc, node, recognised_attribs, reqd_attribs=None): ''' Error checking of XML input: check that the given node has certain required attributes, and does not have any unrecognised attributes. Arguments: - `loc`: a string with some information about the location of the error in the XML file - `node`: the node to check - `recognised_attribs`: a set of node attributes which we know how to handle - `reqd_attribs`: a set of node attributes which we require to be present; if this argument is None, it will take the same value as `recognised_attribs` ''' if reqd_attribs is None: reqd_attribs = recognised_attribs found_attribs = set(node.keys()) if reqd_attribs - found_attribs: print(loc, 'missing <{0}> attributes'.format(node.tag), reqd_attribs - found_attribs) if found_attribs - recognised_attribs: print(loc, 'unrecognised <{0}> properties'.format(node.tag), found_attribs - recognised_attribs)
[ "def", "warn_attribs", "(", "loc", ",", "node", ",", "recognised_attribs", ",", "reqd_attribs", "=", "None", ")", ":", "if", "reqd_attribs", "is", "None", ":", "reqd_attribs", "=", "recognised_attribs", "found_attribs", "=", "set", "(", "node", ".", "keys", "(", ")", ")", "if", "reqd_attribs", "-", "found_attribs", ":", "print", "(", "loc", ",", "'missing <{0}> attributes'", ".", "format", "(", "node", ".", "tag", ")", ",", "reqd_attribs", "-", "found_attribs", ")", "if", "found_attribs", "-", "recognised_attribs", ":", "print", "(", "loc", ",", "'unrecognised <{0}> properties'", ".", "format", "(", "node", ".", "tag", ")", ",", "found_attribs", "-", "recognised_attribs", ")" ]
Error checking of XML input: check that the given node has certain required attributes, and does not have any unrecognised attributes. Arguments: - `loc`: a string with some information about the location of the error in the XML file - `node`: the node to check - `recognised_attribs`: a set of node attributes which we know how to handle - `reqd_attribs`: a set of node attributes which we require to be present; if this argument is None, it will take the same value as `recognised_attribs`
[ "Error", "checking", "of", "XML", "input", ":", "check", "that", "the", "given", "node", "has", "certain", "required", "attributes", "and", "does", "not", "have", "any", "unrecognised", "attributes", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L92-L119
wroberts/pygermanet
pygermanet/mongo_import.py
read_lexical_file
def read_lexical_file(filename): ''' Reads in a GermaNet lexical information file and returns its contents as a list of dictionary structures. Arguments: - `filename`: the name of the XML file to read ''' with open(filename, 'rb') as input_file: doc = etree.parse(input_file) synsets = [] assert doc.getroot().tag == 'synsets' for synset in doc.getroot(): if synset.tag != 'synset': print('unrecognised child of <synsets>', synset) continue synset_dict = dict(synset.items()) synloc = '{0} synset {1},'.format(filename, synset_dict.get('id', '???')) warn_attribs(synloc, synset, SYNSET_ATTRIBS) synset_dict['lexunits'] = [] synsets.append(synset_dict) for child in synset: if child.tag == 'lexUnit': lexunit = child lexunit_dict = dict(lexunit.items()) lexloc = synloc + ' lexUnit {0},'.format( lexunit_dict.get('id', '???')) warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS) # convert some properties to booleans for key in ['styleMarking', 'artificial', 'namedEntity']: if key in lexunit_dict: if lexunit_dict[key] not in MAP_YESNO_TO_BOOL: print(lexloc, ('lexunit property {0} has ' 'non-boolean value').format(key), lexunit_dict[key]) continue lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]] # convert sense to integer number if 'sense' in lexunit_dict: if lexunit_dict['sense'].isdigit(): lexunit_dict['sense'] = int(lexunit_dict['sense'], 10) else: print(lexloc, 'lexunit property sense has non-numeric value', lexunit_dict['sense']) synset_dict['lexunits'].append(lexunit_dict) lexunit_dict['examples'] = [] lexunit_dict['frames'] = [] for child in lexunit: if child.tag in ['orthForm', 'orthVar', 'oldOrthForm', 'oldOrthVar']: warn_attribs(lexloc, child, set()) if not child.text: print(lexloc, '{0} with no text'.format(child.tag)) continue if child.tag in lexunit_dict: print(lexloc, 'more than one {0}'.format(child.tag)) lexunit_dict[child.tag] = str(child.text) elif child.tag == 'example': example = child text = [child for child in example if child.tag == 'text'] if len(text) != 1 or not text[0].text: print(lexloc, '<example> tag without text') example_dict = {'text': str(text[0].text)} for child in example: if child.tag == 'text': continue elif child.tag == 'exframe': if 'exframe' in example_dict: print(lexloc, 'more than one <exframe> ' 'for <example>') warn_attribs(lexloc, child, set()) if not child.text: print(lexloc, '<exframe> with no text') continue example_dict['exframe'] = str(child.text) else: print(lexloc, 'unrecognised child of <example>', child) lexunit_dict['examples'].append(example_dict) elif child.tag == 'frame': frame = child warn_attribs(lexloc, frame, set()) if 0 < len(frame): print(lexloc, 'unrecognised <frame> children', list(frame)) if not frame.text: print(lexloc, '<frame> without text') continue lexunit_dict['frames'].append(str(frame.text)) elif child.tag == 'compound': compound = child warn_attribs(lexloc, compound, set()) compound_dict = {} for child in compound: if child.tag == 'modifier': modifier_dict = dict(child.items()) warn_attribs(lexloc, child, MODIFIER_ATTRIBS, set()) if not child.text: print(lexloc, 'modifier without text') continue modifier_dict['text'] = str(child.text) if 'modifier' not in compound_dict: compound_dict['modifier'] = [] compound_dict['modifier'].append(modifier_dict) elif child.tag == 'head': head_dict = dict(child.items()) warn_attribs(lexloc, child, HEAD_ATTRIBS, set()) if not child.text: print(lexloc, '<head> without text') continue head_dict['text'] = str(child.text) if 'head' in compound_dict: print(lexloc, 'more than one head in <compound>') compound_dict['head'] = head_dict else: print(lexloc, 'unrecognised child of <compound>', child) continue else: print(lexloc, 'unrecognised child of <lexUnit>', child) continue elif child.tag == 'paraphrase': paraphrase = child warn_attribs(synloc, paraphrase, set()) paraphrase_text = str(paraphrase.text) if not paraphrase_text: print(synloc, 'WARNING: <paraphrase> tag with no text') else: print(synloc, 'unrecognised child of <synset>', child) continue return synsets
python
def read_lexical_file(filename): ''' Reads in a GermaNet lexical information file and returns its contents as a list of dictionary structures. Arguments: - `filename`: the name of the XML file to read ''' with open(filename, 'rb') as input_file: doc = etree.parse(input_file) synsets = [] assert doc.getroot().tag == 'synsets' for synset in doc.getroot(): if synset.tag != 'synset': print('unrecognised child of <synsets>', synset) continue synset_dict = dict(synset.items()) synloc = '{0} synset {1},'.format(filename, synset_dict.get('id', '???')) warn_attribs(synloc, synset, SYNSET_ATTRIBS) synset_dict['lexunits'] = [] synsets.append(synset_dict) for child in synset: if child.tag == 'lexUnit': lexunit = child lexunit_dict = dict(lexunit.items()) lexloc = synloc + ' lexUnit {0},'.format( lexunit_dict.get('id', '???')) warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS) # convert some properties to booleans for key in ['styleMarking', 'artificial', 'namedEntity']: if key in lexunit_dict: if lexunit_dict[key] not in MAP_YESNO_TO_BOOL: print(lexloc, ('lexunit property {0} has ' 'non-boolean value').format(key), lexunit_dict[key]) continue lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]] # convert sense to integer number if 'sense' in lexunit_dict: if lexunit_dict['sense'].isdigit(): lexunit_dict['sense'] = int(lexunit_dict['sense'], 10) else: print(lexloc, 'lexunit property sense has non-numeric value', lexunit_dict['sense']) synset_dict['lexunits'].append(lexunit_dict) lexunit_dict['examples'] = [] lexunit_dict['frames'] = [] for child in lexunit: if child.tag in ['orthForm', 'orthVar', 'oldOrthForm', 'oldOrthVar']: warn_attribs(lexloc, child, set()) if not child.text: print(lexloc, '{0} with no text'.format(child.tag)) continue if child.tag in lexunit_dict: print(lexloc, 'more than one {0}'.format(child.tag)) lexunit_dict[child.tag] = str(child.text) elif child.tag == 'example': example = child text = [child for child in example if child.tag == 'text'] if len(text) != 1 or not text[0].text: print(lexloc, '<example> tag without text') example_dict = {'text': str(text[0].text)} for child in example: if child.tag == 'text': continue elif child.tag == 'exframe': if 'exframe' in example_dict: print(lexloc, 'more than one <exframe> ' 'for <example>') warn_attribs(lexloc, child, set()) if not child.text: print(lexloc, '<exframe> with no text') continue example_dict['exframe'] = str(child.text) else: print(lexloc, 'unrecognised child of <example>', child) lexunit_dict['examples'].append(example_dict) elif child.tag == 'frame': frame = child warn_attribs(lexloc, frame, set()) if 0 < len(frame): print(lexloc, 'unrecognised <frame> children', list(frame)) if not frame.text: print(lexloc, '<frame> without text') continue lexunit_dict['frames'].append(str(frame.text)) elif child.tag == 'compound': compound = child warn_attribs(lexloc, compound, set()) compound_dict = {} for child in compound: if child.tag == 'modifier': modifier_dict = dict(child.items()) warn_attribs(lexloc, child, MODIFIER_ATTRIBS, set()) if not child.text: print(lexloc, 'modifier without text') continue modifier_dict['text'] = str(child.text) if 'modifier' not in compound_dict: compound_dict['modifier'] = [] compound_dict['modifier'].append(modifier_dict) elif child.tag == 'head': head_dict = dict(child.items()) warn_attribs(lexloc, child, HEAD_ATTRIBS, set()) if not child.text: print(lexloc, '<head> without text') continue head_dict['text'] = str(child.text) if 'head' in compound_dict: print(lexloc, 'more than one head in <compound>') compound_dict['head'] = head_dict else: print(lexloc, 'unrecognised child of <compound>', child) continue else: print(lexloc, 'unrecognised child of <lexUnit>', child) continue elif child.tag == 'paraphrase': paraphrase = child warn_attribs(synloc, paraphrase, set()) paraphrase_text = str(paraphrase.text) if not paraphrase_text: print(synloc, 'WARNING: <paraphrase> tag with no text') else: print(synloc, 'unrecognised child of <synset>', child) continue return synsets
[ "def", "read_lexical_file", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "input_file", ":", "doc", "=", "etree", ".", "parse", "(", "input_file", ")", "synsets", "=", "[", "]", "assert", "doc", ".", "getroot", "(", ")", ".", "tag", "==", "'synsets'", "for", "synset", "in", "doc", ".", "getroot", "(", ")", ":", "if", "synset", ".", "tag", "!=", "'synset'", ":", "print", "(", "'unrecognised child of <synsets>'", ",", "synset", ")", "continue", "synset_dict", "=", "dict", "(", "synset", ".", "items", "(", ")", ")", "synloc", "=", "'{0} synset {1},'", ".", "format", "(", "filename", ",", "synset_dict", ".", "get", "(", "'id'", ",", "'???'", ")", ")", "warn_attribs", "(", "synloc", ",", "synset", ",", "SYNSET_ATTRIBS", ")", "synset_dict", "[", "'lexunits'", "]", "=", "[", "]", "synsets", ".", "append", "(", "synset_dict", ")", "for", "child", "in", "synset", ":", "if", "child", ".", "tag", "==", "'lexUnit'", ":", "lexunit", "=", "child", "lexunit_dict", "=", "dict", "(", "lexunit", ".", "items", "(", ")", ")", "lexloc", "=", "synloc", "+", "' lexUnit {0},'", ".", "format", "(", "lexunit_dict", ".", "get", "(", "'id'", ",", "'???'", ")", ")", "warn_attribs", "(", "lexloc", ",", "lexunit", ",", "LEXUNIT_ATTRIBS", ")", "# convert some properties to booleans", "for", "key", "in", "[", "'styleMarking'", ",", "'artificial'", ",", "'namedEntity'", "]", ":", "if", "key", "in", "lexunit_dict", ":", "if", "lexunit_dict", "[", "key", "]", "not", "in", "MAP_YESNO_TO_BOOL", ":", "print", "(", "lexloc", ",", "(", "'lexunit property {0} has '", "'non-boolean value'", ")", ".", "format", "(", "key", ")", ",", "lexunit_dict", "[", "key", "]", ")", "continue", "lexunit_dict", "[", "key", "]", "=", "MAP_YESNO_TO_BOOL", "[", "lexunit_dict", "[", "key", "]", "]", "# convert sense to integer number", "if", "'sense'", "in", "lexunit_dict", ":", "if", "lexunit_dict", "[", "'sense'", "]", ".", "isdigit", "(", ")", ":", "lexunit_dict", "[", "'sense'", "]", "=", "int", "(", "lexunit_dict", "[", "'sense'", "]", ",", "10", ")", "else", ":", "print", "(", "lexloc", ",", "'lexunit property sense has non-numeric value'", ",", "lexunit_dict", "[", "'sense'", "]", ")", "synset_dict", "[", "'lexunits'", "]", ".", "append", "(", "lexunit_dict", ")", "lexunit_dict", "[", "'examples'", "]", "=", "[", "]", "lexunit_dict", "[", "'frames'", "]", "=", "[", "]", "for", "child", "in", "lexunit", ":", "if", "child", ".", "tag", "in", "[", "'orthForm'", ",", "'orthVar'", ",", "'oldOrthForm'", ",", "'oldOrthVar'", "]", ":", "warn_attribs", "(", "lexloc", ",", "child", ",", "set", "(", ")", ")", "if", "not", "child", ".", "text", ":", "print", "(", "lexloc", ",", "'{0} with no text'", ".", "format", "(", "child", ".", "tag", ")", ")", "continue", "if", "child", ".", "tag", "in", "lexunit_dict", ":", "print", "(", "lexloc", ",", "'more than one {0}'", ".", "format", "(", "child", ".", "tag", ")", ")", "lexunit_dict", "[", "child", ".", "tag", "]", "=", "str", "(", "child", ".", "text", ")", "elif", "child", ".", "tag", "==", "'example'", ":", "example", "=", "child", "text", "=", "[", "child", "for", "child", "in", "example", "if", "child", ".", "tag", "==", "'text'", "]", "if", "len", "(", "text", ")", "!=", "1", "or", "not", "text", "[", "0", "]", ".", "text", ":", "print", "(", "lexloc", ",", "'<example> tag without text'", ")", "example_dict", "=", "{", "'text'", ":", "str", "(", "text", "[", "0", "]", ".", "text", ")", "}", "for", "child", "in", "example", ":", "if", "child", ".", "tag", "==", "'text'", ":", "continue", "elif", "child", ".", "tag", "==", "'exframe'", ":", "if", "'exframe'", "in", "example_dict", ":", "print", "(", "lexloc", ",", "'more than one <exframe> '", "'for <example>'", ")", "warn_attribs", "(", "lexloc", ",", "child", ",", "set", "(", ")", ")", "if", "not", "child", ".", "text", ":", "print", "(", "lexloc", ",", "'<exframe> with no text'", ")", "continue", "example_dict", "[", "'exframe'", "]", "=", "str", "(", "child", ".", "text", ")", "else", ":", "print", "(", "lexloc", ",", "'unrecognised child of <example>'", ",", "child", ")", "lexunit_dict", "[", "'examples'", "]", ".", "append", "(", "example_dict", ")", "elif", "child", ".", "tag", "==", "'frame'", ":", "frame", "=", "child", "warn_attribs", "(", "lexloc", ",", "frame", ",", "set", "(", ")", ")", "if", "0", "<", "len", "(", "frame", ")", ":", "print", "(", "lexloc", ",", "'unrecognised <frame> children'", ",", "list", "(", "frame", ")", ")", "if", "not", "frame", ".", "text", ":", "print", "(", "lexloc", ",", "'<frame> without text'", ")", "continue", "lexunit_dict", "[", "'frames'", "]", ".", "append", "(", "str", "(", "frame", ".", "text", ")", ")", "elif", "child", ".", "tag", "==", "'compound'", ":", "compound", "=", "child", "warn_attribs", "(", "lexloc", ",", "compound", ",", "set", "(", ")", ")", "compound_dict", "=", "{", "}", "for", "child", "in", "compound", ":", "if", "child", ".", "tag", "==", "'modifier'", ":", "modifier_dict", "=", "dict", "(", "child", ".", "items", "(", ")", ")", "warn_attribs", "(", "lexloc", ",", "child", ",", "MODIFIER_ATTRIBS", ",", "set", "(", ")", ")", "if", "not", "child", ".", "text", ":", "print", "(", "lexloc", ",", "'modifier without text'", ")", "continue", "modifier_dict", "[", "'text'", "]", "=", "str", "(", "child", ".", "text", ")", "if", "'modifier'", "not", "in", "compound_dict", ":", "compound_dict", "[", "'modifier'", "]", "=", "[", "]", "compound_dict", "[", "'modifier'", "]", ".", "append", "(", "modifier_dict", ")", "elif", "child", ".", "tag", "==", "'head'", ":", "head_dict", "=", "dict", "(", "child", ".", "items", "(", ")", ")", "warn_attribs", "(", "lexloc", ",", "child", ",", "HEAD_ATTRIBS", ",", "set", "(", ")", ")", "if", "not", "child", ".", "text", ":", "print", "(", "lexloc", ",", "'<head> without text'", ")", "continue", "head_dict", "[", "'text'", "]", "=", "str", "(", "child", ".", "text", ")", "if", "'head'", "in", "compound_dict", ":", "print", "(", "lexloc", ",", "'more than one head in <compound>'", ")", "compound_dict", "[", "'head'", "]", "=", "head_dict", "else", ":", "print", "(", "lexloc", ",", "'unrecognised child of <compound>'", ",", "child", ")", "continue", "else", ":", "print", "(", "lexloc", ",", "'unrecognised child of <lexUnit>'", ",", "child", ")", "continue", "elif", "child", ".", "tag", "==", "'paraphrase'", ":", "paraphrase", "=", "child", "warn_attribs", "(", "synloc", ",", "paraphrase", ",", "set", "(", ")", ")", "paraphrase_text", "=", "str", "(", "paraphrase", ".", "text", ")", "if", "not", "paraphrase_text", ":", "print", "(", "synloc", ",", "'WARNING: <paraphrase> tag with no text'", ")", "else", ":", "print", "(", "synloc", ",", "'unrecognised child of <synset>'", ",", "child", ")", "continue", "return", "synsets" ]
Reads in a GermaNet lexical information file and returns its contents as a list of dictionary structures. Arguments: - `filename`: the name of the XML file to read
[ "Reads", "in", "a", "GermaNet", "lexical", "information", "file", "and", "returns", "its", "contents", "as", "a", "list", "of", "dictionary", "structures", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L132-L275
wroberts/pygermanet
pygermanet/mongo_import.py
read_relation_file
def read_relation_file(filename): ''' Reads the GermaNet relation file ``gn_relations.xml`` which lists all the relations holding between lexical units and synsets. Arguments: - `filename`: ''' with open(filename, 'rb') as input_file: doc = etree.parse(input_file) lex_rels = [] con_rels = [] assert doc.getroot().tag == 'relations' for child in doc.getroot(): if child.tag == 'lex_rel': if 0 < len(child): print('<lex_rel> has unexpected child node') child_dict = dict(child.items()) warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD) if child_dict['dir'] not in LEX_REL_DIRS: print('unrecognized <lex_rel> dir', child_dict['dir']) if child_dict['dir'] == 'both' and 'inv' not in child_dict: print('<lex_rel> has dir=both but does not specify inv') lex_rels.append(child_dict) elif child.tag == 'con_rel': if 0 < len(child): print('<con_rel> has unexpected child node') child_dict = dict(child.items()) warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD) if child_dict['dir'] not in CON_REL_DIRS: print('unrecognised <con_rel> dir', child_dict['dir']) if (child_dict['dir'] in ['both', 'revert'] and 'inv' not in child_dict): print('<con_rel> has dir={0} but does not specify inv'.format( child_dict['dir'])) con_rels.append(child_dict) else: print('unrecognised child of <relations>', child) continue return lex_rels, con_rels
python
def read_relation_file(filename): ''' Reads the GermaNet relation file ``gn_relations.xml`` which lists all the relations holding between lexical units and synsets. Arguments: - `filename`: ''' with open(filename, 'rb') as input_file: doc = etree.parse(input_file) lex_rels = [] con_rels = [] assert doc.getroot().tag == 'relations' for child in doc.getroot(): if child.tag == 'lex_rel': if 0 < len(child): print('<lex_rel> has unexpected child node') child_dict = dict(child.items()) warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD) if child_dict['dir'] not in LEX_REL_DIRS: print('unrecognized <lex_rel> dir', child_dict['dir']) if child_dict['dir'] == 'both' and 'inv' not in child_dict: print('<lex_rel> has dir=both but does not specify inv') lex_rels.append(child_dict) elif child.tag == 'con_rel': if 0 < len(child): print('<con_rel> has unexpected child node') child_dict = dict(child.items()) warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD) if child_dict['dir'] not in CON_REL_DIRS: print('unrecognised <con_rel> dir', child_dict['dir']) if (child_dict['dir'] in ['both', 'revert'] and 'inv' not in child_dict): print('<con_rel> has dir={0} but does not specify inv'.format( child_dict['dir'])) con_rels.append(child_dict) else: print('unrecognised child of <relations>', child) continue return lex_rels, con_rels
[ "def", "read_relation_file", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "input_file", ":", "doc", "=", "etree", ".", "parse", "(", "input_file", ")", "lex_rels", "=", "[", "]", "con_rels", "=", "[", "]", "assert", "doc", ".", "getroot", "(", ")", ".", "tag", "==", "'relations'", "for", "child", "in", "doc", ".", "getroot", "(", ")", ":", "if", "child", ".", "tag", "==", "'lex_rel'", ":", "if", "0", "<", "len", "(", "child", ")", ":", "print", "(", "'<lex_rel> has unexpected child node'", ")", "child_dict", "=", "dict", "(", "child", ".", "items", "(", ")", ")", "warn_attribs", "(", "''", ",", "child", ",", "RELATION_ATTRIBS", ",", "RELATION_ATTRIBS_REQD", ")", "if", "child_dict", "[", "'dir'", "]", "not", "in", "LEX_REL_DIRS", ":", "print", "(", "'unrecognized <lex_rel> dir'", ",", "child_dict", "[", "'dir'", "]", ")", "if", "child_dict", "[", "'dir'", "]", "==", "'both'", "and", "'inv'", "not", "in", "child_dict", ":", "print", "(", "'<lex_rel> has dir=both but does not specify inv'", ")", "lex_rels", ".", "append", "(", "child_dict", ")", "elif", "child", ".", "tag", "==", "'con_rel'", ":", "if", "0", "<", "len", "(", "child", ")", ":", "print", "(", "'<con_rel> has unexpected child node'", ")", "child_dict", "=", "dict", "(", "child", ".", "items", "(", ")", ")", "warn_attribs", "(", "''", ",", "child", ",", "RELATION_ATTRIBS", ",", "RELATION_ATTRIBS_REQD", ")", "if", "child_dict", "[", "'dir'", "]", "not", "in", "CON_REL_DIRS", ":", "print", "(", "'unrecognised <con_rel> dir'", ",", "child_dict", "[", "'dir'", "]", ")", "if", "(", "child_dict", "[", "'dir'", "]", "in", "[", "'both'", ",", "'revert'", "]", "and", "'inv'", "not", "in", "child_dict", ")", ":", "print", "(", "'<con_rel> has dir={0} but does not specify inv'", ".", "format", "(", "child_dict", "[", "'dir'", "]", ")", ")", "con_rels", ".", "append", "(", "child_dict", ")", "else", ":", "print", "(", "'unrecognised child of <relations>'", ",", "child", ")", "continue", "return", "lex_rels", ",", "con_rels" ]
Reads the GermaNet relation file ``gn_relations.xml`` which lists all the relations holding between lexical units and synsets. Arguments: - `filename`:
[ "Reads", "the", "GermaNet", "relation", "file", "gn_relations", ".", "xml", "which", "lists", "all", "the", "relations", "holding", "between", "lexical", "units", "and", "synsets", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L288-L329
wroberts/pygermanet
pygermanet/mongo_import.py
read_paraphrase_file
def read_paraphrase_file(filename): ''' Reads in a GermaNet wiktionary paraphrase file and returns its contents as a list of dictionary structures. Arguments: - `filename`: ''' with open(filename, 'rb') as input_file: doc = etree.parse(input_file) assert doc.getroot().tag == 'wiktionaryParaphrases' paraphrases = [] for child in doc.getroot(): if child.tag == 'wiktionaryParaphrase': paraphrase = child warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS) if 0 < len(paraphrase): print('unrecognised child of <wiktionaryParaphrase>', list(paraphrase)) paraphrase_dict = dict(paraphrase.items()) if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL: print('<paraphrase> attribute "edited" has unexpected value', paraphrase_dict['edited']) else: paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[ paraphrase_dict['edited']] if not paraphrase_dict['wiktionarySenseId'].isdigit(): print('<paraphrase> attribute "wiktionarySenseId" has ' 'non-integer value', paraphrase_dict['edited']) else: paraphrase_dict['wiktionarySenseId'] = \ int(paraphrase_dict['wiktionarySenseId'], 10) paraphrases.append(paraphrase_dict) else: print('unknown child of <wiktionaryParaphrases>', child) return paraphrases
python
def read_paraphrase_file(filename): ''' Reads in a GermaNet wiktionary paraphrase file and returns its contents as a list of dictionary structures. Arguments: - `filename`: ''' with open(filename, 'rb') as input_file: doc = etree.parse(input_file) assert doc.getroot().tag == 'wiktionaryParaphrases' paraphrases = [] for child in doc.getroot(): if child.tag == 'wiktionaryParaphrase': paraphrase = child warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS) if 0 < len(paraphrase): print('unrecognised child of <wiktionaryParaphrase>', list(paraphrase)) paraphrase_dict = dict(paraphrase.items()) if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL: print('<paraphrase> attribute "edited" has unexpected value', paraphrase_dict['edited']) else: paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[ paraphrase_dict['edited']] if not paraphrase_dict['wiktionarySenseId'].isdigit(): print('<paraphrase> attribute "wiktionarySenseId" has ' 'non-integer value', paraphrase_dict['edited']) else: paraphrase_dict['wiktionarySenseId'] = \ int(paraphrase_dict['wiktionarySenseId'], 10) paraphrases.append(paraphrase_dict) else: print('unknown child of <wiktionaryParaphrases>', child) return paraphrases
[ "def", "read_paraphrase_file", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "input_file", ":", "doc", "=", "etree", ".", "parse", "(", "input_file", ")", "assert", "doc", ".", "getroot", "(", ")", ".", "tag", "==", "'wiktionaryParaphrases'", "paraphrases", "=", "[", "]", "for", "child", "in", "doc", ".", "getroot", "(", ")", ":", "if", "child", ".", "tag", "==", "'wiktionaryParaphrase'", ":", "paraphrase", "=", "child", "warn_attribs", "(", "''", ",", "paraphrase", ",", "PARAPHRASE_ATTRIBS", ")", "if", "0", "<", "len", "(", "paraphrase", ")", ":", "print", "(", "'unrecognised child of <wiktionaryParaphrase>'", ",", "list", "(", "paraphrase", ")", ")", "paraphrase_dict", "=", "dict", "(", "paraphrase", ".", "items", "(", ")", ")", "if", "paraphrase_dict", "[", "'edited'", "]", "not", "in", "MAP_YESNO_TO_BOOL", ":", "print", "(", "'<paraphrase> attribute \"edited\" has unexpected value'", ",", "paraphrase_dict", "[", "'edited'", "]", ")", "else", ":", "paraphrase_dict", "[", "'edited'", "]", "=", "MAP_YESNO_TO_BOOL", "[", "paraphrase_dict", "[", "'edited'", "]", "]", "if", "not", "paraphrase_dict", "[", "'wiktionarySenseId'", "]", ".", "isdigit", "(", ")", ":", "print", "(", "'<paraphrase> attribute \"wiktionarySenseId\" has '", "'non-integer value'", ",", "paraphrase_dict", "[", "'edited'", "]", ")", "else", ":", "paraphrase_dict", "[", "'wiktionarySenseId'", "]", "=", "int", "(", "paraphrase_dict", "[", "'wiktionarySenseId'", "]", ",", "10", ")", "paraphrases", ".", "append", "(", "paraphrase_dict", ")", "else", ":", "print", "(", "'unknown child of <wiktionaryParaphrases>'", ",", "child", ")", "return", "paraphrases" ]
Reads in a GermaNet wiktionary paraphrase file and returns its contents as a list of dictionary structures. Arguments: - `filename`:
[ "Reads", "in", "a", "GermaNet", "wiktionary", "paraphrase", "file", "and", "returns", "its", "contents", "as", "a", "list", "of", "dictionary", "structures", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L339-L376
wroberts/pygermanet
pygermanet/mongo_import.py
insert_lexical_information
def insert_lexical_information(germanet_db, lex_files): ''' Reads in the given lexical information files and inserts their contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `lex_files`: a list of paths to XML files containing lexial information ''' # drop the database collections if they already exist germanet_db.lexunits.drop() germanet_db.synsets.drop() # inject data from XML files into the database for lex_file in lex_files: synsets = read_lexical_file(lex_file) for synset in synsets: synset = dict((SYNSET_KEY_REWRITES.get(key, key), value) for (key, value) in synset.items()) lexunits = synset['lexunits'] synset['lexunits'] = germanet_db.lexunits.insert(lexunits) synset_id = germanet_db.synsets.insert(synset) for lexunit in lexunits: lexunit['synset'] = synset_id lexunit['category'] = synset['category'] germanet_db.lexunits.save(lexunit) # index the two collections by id germanet_db.synsets.create_index('id') germanet_db.lexunits.create_index('id') # also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum germanet_db.lexunits.create_index([('orthForm', DESCENDING)]) germanet_db.lexunits.create_index([('orthForm', DESCENDING), ('category', DESCENDING)]) germanet_db.lexunits.create_index([('orthForm', DESCENDING), ('category', DESCENDING), ('sense', DESCENDING)]) print('Inserted {0} synsets, {1} lexical units.'.format( germanet_db.synsets.count(), germanet_db.lexunits.count()))
python
def insert_lexical_information(germanet_db, lex_files): ''' Reads in the given lexical information files and inserts their contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `lex_files`: a list of paths to XML files containing lexial information ''' # drop the database collections if they already exist germanet_db.lexunits.drop() germanet_db.synsets.drop() # inject data from XML files into the database for lex_file in lex_files: synsets = read_lexical_file(lex_file) for synset in synsets: synset = dict((SYNSET_KEY_REWRITES.get(key, key), value) for (key, value) in synset.items()) lexunits = synset['lexunits'] synset['lexunits'] = germanet_db.lexunits.insert(lexunits) synset_id = germanet_db.synsets.insert(synset) for lexunit in lexunits: lexunit['synset'] = synset_id lexunit['category'] = synset['category'] germanet_db.lexunits.save(lexunit) # index the two collections by id germanet_db.synsets.create_index('id') germanet_db.lexunits.create_index('id') # also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum germanet_db.lexunits.create_index([('orthForm', DESCENDING)]) germanet_db.lexunits.create_index([('orthForm', DESCENDING), ('category', DESCENDING)]) germanet_db.lexunits.create_index([('orthForm', DESCENDING), ('category', DESCENDING), ('sense', DESCENDING)]) print('Inserted {0} synsets, {1} lexical units.'.format( germanet_db.synsets.count(), germanet_db.lexunits.count()))
[ "def", "insert_lexical_information", "(", "germanet_db", ",", "lex_files", ")", ":", "# drop the database collections if they already exist", "germanet_db", ".", "lexunits", ".", "drop", "(", ")", "germanet_db", ".", "synsets", ".", "drop", "(", ")", "# inject data from XML files into the database", "for", "lex_file", "in", "lex_files", ":", "synsets", "=", "read_lexical_file", "(", "lex_file", ")", "for", "synset", "in", "synsets", ":", "synset", "=", "dict", "(", "(", "SYNSET_KEY_REWRITES", ".", "get", "(", "key", ",", "key", ")", ",", "value", ")", "for", "(", "key", ",", "value", ")", "in", "synset", ".", "items", "(", ")", ")", "lexunits", "=", "synset", "[", "'lexunits'", "]", "synset", "[", "'lexunits'", "]", "=", "germanet_db", ".", "lexunits", ".", "insert", "(", "lexunits", ")", "synset_id", "=", "germanet_db", ".", "synsets", ".", "insert", "(", "synset", ")", "for", "lexunit", "in", "lexunits", ":", "lexunit", "[", "'synset'", "]", "=", "synset_id", "lexunit", "[", "'category'", "]", "=", "synset", "[", "'category'", "]", "germanet_db", ".", "lexunits", ".", "save", "(", "lexunit", ")", "# index the two collections by id", "germanet_db", ".", "synsets", ".", "create_index", "(", "'id'", ")", "germanet_db", ".", "lexunits", ".", "create_index", "(", "'id'", ")", "# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum", "germanet_db", ".", "lexunits", ".", "create_index", "(", "[", "(", "'orthForm'", ",", "DESCENDING", ")", "]", ")", "germanet_db", ".", "lexunits", ".", "create_index", "(", "[", "(", "'orthForm'", ",", "DESCENDING", ")", ",", "(", "'category'", ",", "DESCENDING", ")", "]", ")", "germanet_db", ".", "lexunits", ".", "create_index", "(", "[", "(", "'orthForm'", ",", "DESCENDING", ")", ",", "(", "'category'", ",", "DESCENDING", ")", ",", "(", "'sense'", ",", "DESCENDING", ")", "]", ")", "print", "(", "'Inserted {0} synsets, {1} lexical units.'", ".", "format", "(", "germanet_db", ".", "synsets", ".", "count", "(", ")", ",", "germanet_db", ".", "lexunits", ".", "count", "(", ")", ")", ")" ]
Reads in the given lexical information files and inserts their contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `lex_files`: a list of paths to XML files containing lexial information
[ "Reads", "in", "the", "given", "lexical", "information", "files", "and", "inserts", "their", "contents", "into", "the", "given", "MongoDB", "database", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L389-L427
wroberts/pygermanet
pygermanet/mongo_import.py
insert_relation_information
def insert_relation_information(germanet_db, gn_rels_file): ''' Reads in the given GermaNet relation file and inserts its contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `gn_rels_file`: ''' lex_rels, con_rels = read_relation_file(gn_rels_file) # cache the lexunits while we work on them lexunits = {} for lex_rel in lex_rels: if lex_rel['from'] not in lexunits: lexunits[lex_rel['from']] = germanet_db.lexunits.find_one( {'id': lex_rel['from']}) from_lexunit = lexunits[lex_rel['from']] if lex_rel['to'] not in lexunits: lexunits[lex_rel['to']] = germanet_db.lexunits.find_one( {'id': lex_rel['to']}) to_lexunit = lexunits[lex_rel['to']] if 'rels' not in from_lexunit: from_lexunit['rels'] = set() from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id'])) if lex_rel['dir'] == 'both': if 'rels' not in to_lexunit: to_lexunit['rels'] = set() to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id'])) for lexunit in lexunits.values(): if 'rels' in lexunit: lexunit['rels'] = sorted(lexunit['rels']) germanet_db.lexunits.save(lexunit) # cache the synsets while we work on them synsets = {} for con_rel in con_rels: if con_rel['from'] not in synsets: synsets[con_rel['from']] = germanet_db.synsets.find_one( {'id': con_rel['from']}) from_synset = synsets[con_rel['from']] if con_rel['to'] not in synsets: synsets[con_rel['to']] = germanet_db.synsets.find_one( {'id': con_rel['to']}) to_synset = synsets[con_rel['to']] if 'rels' not in from_synset: from_synset['rels'] = set() from_synset['rels'].add((con_rel['name'], to_synset['_id'])) if con_rel['dir'] in ['both', 'revert']: if 'rels' not in to_synset: to_synset['rels'] = set() to_synset['rels'].add((con_rel['inv'], from_synset['_id'])) for synset in synsets.values(): if 'rels' in synset: synset['rels'] = sorted(synset['rels']) germanet_db.synsets.save(synset) print('Inserted {0} lexical relations, {1} synset relations.'.format( len(lex_rels), len(con_rels)))
python
def insert_relation_information(germanet_db, gn_rels_file): ''' Reads in the given GermaNet relation file and inserts its contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `gn_rels_file`: ''' lex_rels, con_rels = read_relation_file(gn_rels_file) # cache the lexunits while we work on them lexunits = {} for lex_rel in lex_rels: if lex_rel['from'] not in lexunits: lexunits[lex_rel['from']] = germanet_db.lexunits.find_one( {'id': lex_rel['from']}) from_lexunit = lexunits[lex_rel['from']] if lex_rel['to'] not in lexunits: lexunits[lex_rel['to']] = germanet_db.lexunits.find_one( {'id': lex_rel['to']}) to_lexunit = lexunits[lex_rel['to']] if 'rels' not in from_lexunit: from_lexunit['rels'] = set() from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id'])) if lex_rel['dir'] == 'both': if 'rels' not in to_lexunit: to_lexunit['rels'] = set() to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id'])) for lexunit in lexunits.values(): if 'rels' in lexunit: lexunit['rels'] = sorted(lexunit['rels']) germanet_db.lexunits.save(lexunit) # cache the synsets while we work on them synsets = {} for con_rel in con_rels: if con_rel['from'] not in synsets: synsets[con_rel['from']] = germanet_db.synsets.find_one( {'id': con_rel['from']}) from_synset = synsets[con_rel['from']] if con_rel['to'] not in synsets: synsets[con_rel['to']] = germanet_db.synsets.find_one( {'id': con_rel['to']}) to_synset = synsets[con_rel['to']] if 'rels' not in from_synset: from_synset['rels'] = set() from_synset['rels'].add((con_rel['name'], to_synset['_id'])) if con_rel['dir'] in ['both', 'revert']: if 'rels' not in to_synset: to_synset['rels'] = set() to_synset['rels'].add((con_rel['inv'], from_synset['_id'])) for synset in synsets.values(): if 'rels' in synset: synset['rels'] = sorted(synset['rels']) germanet_db.synsets.save(synset) print('Inserted {0} lexical relations, {1} synset relations.'.format( len(lex_rels), len(con_rels)))
[ "def", "insert_relation_information", "(", "germanet_db", ",", "gn_rels_file", ")", ":", "lex_rels", ",", "con_rels", "=", "read_relation_file", "(", "gn_rels_file", ")", "# cache the lexunits while we work on them", "lexunits", "=", "{", "}", "for", "lex_rel", "in", "lex_rels", ":", "if", "lex_rel", "[", "'from'", "]", "not", "in", "lexunits", ":", "lexunits", "[", "lex_rel", "[", "'from'", "]", "]", "=", "germanet_db", ".", "lexunits", ".", "find_one", "(", "{", "'id'", ":", "lex_rel", "[", "'from'", "]", "}", ")", "from_lexunit", "=", "lexunits", "[", "lex_rel", "[", "'from'", "]", "]", "if", "lex_rel", "[", "'to'", "]", "not", "in", "lexunits", ":", "lexunits", "[", "lex_rel", "[", "'to'", "]", "]", "=", "germanet_db", ".", "lexunits", ".", "find_one", "(", "{", "'id'", ":", "lex_rel", "[", "'to'", "]", "}", ")", "to_lexunit", "=", "lexunits", "[", "lex_rel", "[", "'to'", "]", "]", "if", "'rels'", "not", "in", "from_lexunit", ":", "from_lexunit", "[", "'rels'", "]", "=", "set", "(", ")", "from_lexunit", "[", "'rels'", "]", ".", "add", "(", "(", "lex_rel", "[", "'name'", "]", ",", "to_lexunit", "[", "'_id'", "]", ")", ")", "if", "lex_rel", "[", "'dir'", "]", "==", "'both'", ":", "if", "'rels'", "not", "in", "to_lexunit", ":", "to_lexunit", "[", "'rels'", "]", "=", "set", "(", ")", "to_lexunit", "[", "'rels'", "]", ".", "add", "(", "(", "lex_rel", "[", "'inv'", "]", ",", "from_lexunit", "[", "'_id'", "]", ")", ")", "for", "lexunit", "in", "lexunits", ".", "values", "(", ")", ":", "if", "'rels'", "in", "lexunit", ":", "lexunit", "[", "'rels'", "]", "=", "sorted", "(", "lexunit", "[", "'rels'", "]", ")", "germanet_db", ".", "lexunits", ".", "save", "(", "lexunit", ")", "# cache the synsets while we work on them", "synsets", "=", "{", "}", "for", "con_rel", "in", "con_rels", ":", "if", "con_rel", "[", "'from'", "]", "not", "in", "synsets", ":", "synsets", "[", "con_rel", "[", "'from'", "]", "]", "=", "germanet_db", ".", "synsets", ".", "find_one", "(", "{", "'id'", ":", "con_rel", "[", "'from'", "]", "}", ")", "from_synset", "=", "synsets", "[", "con_rel", "[", "'from'", "]", "]", "if", "con_rel", "[", "'to'", "]", "not", "in", "synsets", ":", "synsets", "[", "con_rel", "[", "'to'", "]", "]", "=", "germanet_db", ".", "synsets", ".", "find_one", "(", "{", "'id'", ":", "con_rel", "[", "'to'", "]", "}", ")", "to_synset", "=", "synsets", "[", "con_rel", "[", "'to'", "]", "]", "if", "'rels'", "not", "in", "from_synset", ":", "from_synset", "[", "'rels'", "]", "=", "set", "(", ")", "from_synset", "[", "'rels'", "]", ".", "add", "(", "(", "con_rel", "[", "'name'", "]", ",", "to_synset", "[", "'_id'", "]", ")", ")", "if", "con_rel", "[", "'dir'", "]", "in", "[", "'both'", ",", "'revert'", "]", ":", "if", "'rels'", "not", "in", "to_synset", ":", "to_synset", "[", "'rels'", "]", "=", "set", "(", ")", "to_synset", "[", "'rels'", "]", ".", "add", "(", "(", "con_rel", "[", "'inv'", "]", ",", "from_synset", "[", "'_id'", "]", ")", ")", "for", "synset", "in", "synsets", ".", "values", "(", ")", ":", "if", "'rels'", "in", "synset", ":", "synset", "[", "'rels'", "]", "=", "sorted", "(", "synset", "[", "'rels'", "]", ")", "germanet_db", ".", "synsets", ".", "save", "(", "synset", ")", "print", "(", "'Inserted {0} lexical relations, {1} synset relations.'", ".", "format", "(", "len", "(", "lex_rels", ")", ",", "len", "(", "con_rels", ")", ")", ")" ]
Reads in the given GermaNet relation file and inserts its contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `gn_rels_file`:
[ "Reads", "in", "the", "given", "GermaNet", "relation", "file", "and", "inserts", "its", "contents", "into", "the", "given", "MongoDB", "database", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L429-L487
wroberts/pygermanet
pygermanet/mongo_import.py
insert_paraphrase_information
def insert_paraphrase_information(germanet_db, wiktionary_files): ''' Reads in the given GermaNet relation file and inserts its contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `wiktionary_files`: ''' num_paraphrases = 0 # cache the lexunits while we work on them lexunits = {} for filename in wiktionary_files: paraphrases = read_paraphrase_file(filename) num_paraphrases += len(paraphrases) for paraphrase in paraphrases: if paraphrase['lexUnitId'] not in lexunits: lexunits[paraphrase['lexUnitId']] = \ germanet_db.lexunits.find_one( {'id': paraphrase['lexUnitId']}) lexunit = lexunits[paraphrase['lexUnitId']] if 'paraphrases' not in lexunit: lexunit['paraphrases'] = [] lexunit['paraphrases'].append(paraphrase) for lexunit in lexunits.values(): germanet_db.lexunits.save(lexunit) print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
python
def insert_paraphrase_information(germanet_db, wiktionary_files): ''' Reads in the given GermaNet relation file and inserts its contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `wiktionary_files`: ''' num_paraphrases = 0 # cache the lexunits while we work on them lexunits = {} for filename in wiktionary_files: paraphrases = read_paraphrase_file(filename) num_paraphrases += len(paraphrases) for paraphrase in paraphrases: if paraphrase['lexUnitId'] not in lexunits: lexunits[paraphrase['lexUnitId']] = \ germanet_db.lexunits.find_one( {'id': paraphrase['lexUnitId']}) lexunit = lexunits[paraphrase['lexUnitId']] if 'paraphrases' not in lexunit: lexunit['paraphrases'] = [] lexunit['paraphrases'].append(paraphrase) for lexunit in lexunits.values(): germanet_db.lexunits.save(lexunit) print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
[ "def", "insert_paraphrase_information", "(", "germanet_db", ",", "wiktionary_files", ")", ":", "num_paraphrases", "=", "0", "# cache the lexunits while we work on them", "lexunits", "=", "{", "}", "for", "filename", "in", "wiktionary_files", ":", "paraphrases", "=", "read_paraphrase_file", "(", "filename", ")", "num_paraphrases", "+=", "len", "(", "paraphrases", ")", "for", "paraphrase", "in", "paraphrases", ":", "if", "paraphrase", "[", "'lexUnitId'", "]", "not", "in", "lexunits", ":", "lexunits", "[", "paraphrase", "[", "'lexUnitId'", "]", "]", "=", "germanet_db", ".", "lexunits", ".", "find_one", "(", "{", "'id'", ":", "paraphrase", "[", "'lexUnitId'", "]", "}", ")", "lexunit", "=", "lexunits", "[", "paraphrase", "[", "'lexUnitId'", "]", "]", "if", "'paraphrases'", "not", "in", "lexunit", ":", "lexunit", "[", "'paraphrases'", "]", "=", "[", "]", "lexunit", "[", "'paraphrases'", "]", ".", "append", "(", "paraphrase", ")", "for", "lexunit", "in", "lexunits", ".", "values", "(", ")", ":", "germanet_db", ".", "lexunits", ".", "save", "(", "lexunit", ")", "print", "(", "'Inserted {0} wiktionary paraphrases.'", ".", "format", "(", "num_paraphrases", ")", ")" ]
Reads in the given GermaNet relation file and inserts its contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `wiktionary_files`:
[ "Reads", "in", "the", "given", "GermaNet", "relation", "file", "and", "inserts", "its", "contents", "into", "the", "given", "MongoDB", "database", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L489-L516
wroberts/pygermanet
pygermanet/mongo_import.py
insert_lemmatisation_data
def insert_lemmatisation_data(germanet_db): ''' Creates the lemmatiser collection in the given MongoDB instance using the data derived from the Projekt deutscher Wortschatz. Arguments: - `germanet_db`: a pymongo.database.Database object ''' # drop the database collection if it already exists germanet_db.lemmatiser.drop() num_lemmas = 0 input_file = gzip.open(os.path.join(os.path.dirname(__file__), LEMMATISATION_FILE)) for line in input_file: line = line.decode('iso-8859-1').strip().split('\t') assert len(line) == 2 germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line)))) num_lemmas += 1 input_file.close() # index the collection on 'word' germanet_db.lemmatiser.create_index('word') print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
python
def insert_lemmatisation_data(germanet_db): ''' Creates the lemmatiser collection in the given MongoDB instance using the data derived from the Projekt deutscher Wortschatz. Arguments: - `germanet_db`: a pymongo.database.Database object ''' # drop the database collection if it already exists germanet_db.lemmatiser.drop() num_lemmas = 0 input_file = gzip.open(os.path.join(os.path.dirname(__file__), LEMMATISATION_FILE)) for line in input_file: line = line.decode('iso-8859-1').strip().split('\t') assert len(line) == 2 germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line)))) num_lemmas += 1 input_file.close() # index the collection on 'word' germanet_db.lemmatiser.create_index('word') print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
[ "def", "insert_lemmatisation_data", "(", "germanet_db", ")", ":", "# drop the database collection if it already exists", "germanet_db", ".", "lemmatiser", ".", "drop", "(", ")", "num_lemmas", "=", "0", "input_file", "=", "gzip", ".", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "LEMMATISATION_FILE", ")", ")", "for", "line", "in", "input_file", ":", "line", "=", "line", ".", "decode", "(", "'iso-8859-1'", ")", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "assert", "len", "(", "line", ")", "==", "2", "germanet_db", ".", "lemmatiser", ".", "insert", "(", "dict", "(", "list", "(", "zip", "(", "(", "'word'", ",", "'lemma'", ")", ",", "line", ")", ")", ")", ")", "num_lemmas", "+=", "1", "input_file", ".", "close", "(", ")", "# index the collection on 'word'", "germanet_db", ".", "lemmatiser", ".", "create_index", "(", "'word'", ")", "print", "(", "'Inserted {0} lemmatiser entries.'", ".", "format", "(", "num_lemmas", ")", ")" ]
Creates the lemmatiser collection in the given MongoDB instance using the data derived from the Projekt deutscher Wortschatz. Arguments: - `germanet_db`: a pymongo.database.Database object
[ "Creates", "the", "lemmatiser", "collection", "in", "the", "given", "MongoDB", "instance", "using", "the", "data", "derived", "from", "the", "Projekt", "deutscher", "Wortschatz", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L520-L542
wroberts/pygermanet
pygermanet/mongo_import.py
insert_infocontent_data
def insert_infocontent_data(germanet_db): ''' For every synset in GermaNet, inserts count information derived from SDEWAC. Arguments: - `germanet_db`: a pymongo.database.Database object ''' gnet = germanet.GermaNet(germanet_db) # use add one smoothing gn_counts = defaultdict(lambda: 1.) total_count = 1 input_file = gzip.open(os.path.join(os.path.dirname(__file__), WORD_COUNT_FILE)) num_lines_read = 0 num_lines = 0 for line in input_file: line = line.decode('utf-8').strip().split('\t') num_lines += 1 if len(line) != 3: continue count, pos, word = line num_lines_read += 1 count = int(count) synsets = set(gnet.synsets(word, pos)) if not synsets: continue # Although Resnik (1995) suggests dividing count by the number # of synsets, Patwardhan et al (2003) argue against doing # this. count = float(count) / len(synsets) for synset in synsets: total_count += count paths = synset.hypernym_paths scount = float(count) / len(paths) for path in paths: for ss in path: gn_counts[ss._id] += scount print('Read {0} of {1} lines from count file.'.format(num_lines_read, num_lines)) print('Recorded counts for {0} synsets.'.format(len(gn_counts))) print('Total count is {0}'.format(total_count)) input_file.close() # update all the synset records in GermaNet num_updates = 0 for synset in germanet_db.synsets.find(): synset['infocont'] = gn_counts[synset['_id']] / total_count germanet_db.synsets.save(synset) num_updates += 1 print('Updated {0} synsets.'.format(num_updates))
python
def insert_infocontent_data(germanet_db): ''' For every synset in GermaNet, inserts count information derived from SDEWAC. Arguments: - `germanet_db`: a pymongo.database.Database object ''' gnet = germanet.GermaNet(germanet_db) # use add one smoothing gn_counts = defaultdict(lambda: 1.) total_count = 1 input_file = gzip.open(os.path.join(os.path.dirname(__file__), WORD_COUNT_FILE)) num_lines_read = 0 num_lines = 0 for line in input_file: line = line.decode('utf-8').strip().split('\t') num_lines += 1 if len(line) != 3: continue count, pos, word = line num_lines_read += 1 count = int(count) synsets = set(gnet.synsets(word, pos)) if not synsets: continue # Although Resnik (1995) suggests dividing count by the number # of synsets, Patwardhan et al (2003) argue against doing # this. count = float(count) / len(synsets) for synset in synsets: total_count += count paths = synset.hypernym_paths scount = float(count) / len(paths) for path in paths: for ss in path: gn_counts[ss._id] += scount print('Read {0} of {1} lines from count file.'.format(num_lines_read, num_lines)) print('Recorded counts for {0} synsets.'.format(len(gn_counts))) print('Total count is {0}'.format(total_count)) input_file.close() # update all the synset records in GermaNet num_updates = 0 for synset in germanet_db.synsets.find(): synset['infocont'] = gn_counts[synset['_id']] / total_count germanet_db.synsets.save(synset) num_updates += 1 print('Updated {0} synsets.'.format(num_updates))
[ "def", "insert_infocontent_data", "(", "germanet_db", ")", ":", "gnet", "=", "germanet", ".", "GermaNet", "(", "germanet_db", ")", "# use add one smoothing", "gn_counts", "=", "defaultdict", "(", "lambda", ":", "1.", ")", "total_count", "=", "1", "input_file", "=", "gzip", ".", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "WORD_COUNT_FILE", ")", ")", "num_lines_read", "=", "0", "num_lines", "=", "0", "for", "line", "in", "input_file", ":", "line", "=", "line", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "num_lines", "+=", "1", "if", "len", "(", "line", ")", "!=", "3", ":", "continue", "count", ",", "pos", ",", "word", "=", "line", "num_lines_read", "+=", "1", "count", "=", "int", "(", "count", ")", "synsets", "=", "set", "(", "gnet", ".", "synsets", "(", "word", ",", "pos", ")", ")", "if", "not", "synsets", ":", "continue", "# Although Resnik (1995) suggests dividing count by the number", "# of synsets, Patwardhan et al (2003) argue against doing", "# this.", "count", "=", "float", "(", "count", ")", "/", "len", "(", "synsets", ")", "for", "synset", "in", "synsets", ":", "total_count", "+=", "count", "paths", "=", "synset", ".", "hypernym_paths", "scount", "=", "float", "(", "count", ")", "/", "len", "(", "paths", ")", "for", "path", "in", "paths", ":", "for", "ss", "in", "path", ":", "gn_counts", "[", "ss", ".", "_id", "]", "+=", "scount", "print", "(", "'Read {0} of {1} lines from count file.'", ".", "format", "(", "num_lines_read", ",", "num_lines", ")", ")", "print", "(", "'Recorded counts for {0} synsets.'", ".", "format", "(", "len", "(", "gn_counts", ")", ")", ")", "print", "(", "'Total count is {0}'", ".", "format", "(", "total_count", ")", ")", "input_file", ".", "close", "(", ")", "# update all the synset records in GermaNet", "num_updates", "=", "0", "for", "synset", "in", "germanet_db", ".", "synsets", ".", "find", "(", ")", ":", "synset", "[", "'infocont'", "]", "=", "gn_counts", "[", "synset", "[", "'_id'", "]", "]", "/", "total_count", "germanet_db", ".", "synsets", ".", "save", "(", "synset", ")", "num_updates", "+=", "1", "print", "(", "'Updated {0} synsets.'", ".", "format", "(", "num_updates", ")", ")" ]
For every synset in GermaNet, inserts count information derived from SDEWAC. Arguments: - `germanet_db`: a pymongo.database.Database object
[ "For", "every", "synset", "in", "GermaNet", "inserts", "count", "information", "derived", "from", "SDEWAC", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L551-L600
wroberts/pygermanet
pygermanet/mongo_import.py
compute_max_min_depth
def compute_max_min_depth(germanet_db): ''' For every part of speech in GermaNet, computes the maximum min_depth in that hierarchy. Arguments: - `germanet_db`: a pymongo.database.Database object ''' gnet = germanet.GermaNet(germanet_db) max_min_depths = defaultdict(lambda: -1) for synset in gnet.all_synsets(): min_depth = synset.min_depth if max_min_depths[synset.category] < min_depth: max_min_depths[synset.category] = min_depth if germanet_db.metainfo.count() == 0: germanet_db.metainfo.insert({}) metainfo = germanet_db.metainfo.find_one() metainfo['max_min_depths'] = max_min_depths germanet_db.metainfo.save(metainfo) print('Computed maximum min_depth for all parts of speech:') print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in sorted(max_min_depths.items())).encode('utf-8'))
python
def compute_max_min_depth(germanet_db): ''' For every part of speech in GermaNet, computes the maximum min_depth in that hierarchy. Arguments: - `germanet_db`: a pymongo.database.Database object ''' gnet = germanet.GermaNet(germanet_db) max_min_depths = defaultdict(lambda: -1) for synset in gnet.all_synsets(): min_depth = synset.min_depth if max_min_depths[synset.category] < min_depth: max_min_depths[synset.category] = min_depth if germanet_db.metainfo.count() == 0: germanet_db.metainfo.insert({}) metainfo = germanet_db.metainfo.find_one() metainfo['max_min_depths'] = max_min_depths germanet_db.metainfo.save(metainfo) print('Computed maximum min_depth for all parts of speech:') print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in sorted(max_min_depths.items())).encode('utf-8'))
[ "def", "compute_max_min_depth", "(", "germanet_db", ")", ":", "gnet", "=", "germanet", ".", "GermaNet", "(", "germanet_db", ")", "max_min_depths", "=", "defaultdict", "(", "lambda", ":", "-", "1", ")", "for", "synset", "in", "gnet", ".", "all_synsets", "(", ")", ":", "min_depth", "=", "synset", ".", "min_depth", "if", "max_min_depths", "[", "synset", ".", "category", "]", "<", "min_depth", ":", "max_min_depths", "[", "synset", ".", "category", "]", "=", "min_depth", "if", "germanet_db", ".", "metainfo", ".", "count", "(", ")", "==", "0", ":", "germanet_db", ".", "metainfo", ".", "insert", "(", "{", "}", ")", "metainfo", "=", "germanet_db", ".", "metainfo", ".", "find_one", "(", ")", "metainfo", "[", "'max_min_depths'", "]", "=", "max_min_depths", "germanet_db", ".", "metainfo", ".", "save", "(", "metainfo", ")", "print", "(", "'Computed maximum min_depth for all parts of speech:'", ")", "print", "(", "u', '", ".", "join", "(", "u'{0}: {1}'", ".", "format", "(", "k", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "sorted", "(", "max_min_depths", ".", "items", "(", ")", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")" ]
For every part of speech in GermaNet, computes the maximum min_depth in that hierarchy. Arguments: - `germanet_db`: a pymongo.database.Database object
[ "For", "every", "part", "of", "speech", "in", "GermaNet", "computes", "the", "maximum", "min_depth", "in", "that", "hierarchy", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L602-L625
wroberts/pygermanet
pygermanet/mongo_import.py
main
def main(): '''Main function.''' usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n ' 'XML_PATH the directory containing the ' 'GermaNet .xml files') parser = optparse.OptionParser(usage=usage) parser.add_option('--host', default=None, help='hostname or IP address of the MongoDB instance ' 'where the GermaNet database will be inserted ' '(default: %default)') parser.add_option('--port', type='int', default=None, help='port number of the MongoDB instance where the ' 'GermaNet database will be inserted (default: %default)') parser.add_option('--database', dest='database_name', default='germanet', help='the name of the database on the MongoDB instance ' 'where GermaNet will be stored (default: %default)') (options, args) = parser.parse_args() if len(args) != 1: parser.error("incorrect number of arguments") sys.exit(1) xml_path = args[0] client = MongoClient(options.host, options.port) germanet_db = client[options.database_name] lex_files, gn_rels_file, wiktionary_files, ili_files = \ find_germanet_xml_files(xml_path) insert_lexical_information(germanet_db, lex_files) insert_relation_information(germanet_db, gn_rels_file) insert_paraphrase_information(germanet_db, wiktionary_files) insert_lemmatisation_data(germanet_db) insert_infocontent_data(germanet_db) compute_max_min_depth(germanet_db) client.close()
python
def main(): '''Main function.''' usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n ' 'XML_PATH the directory containing the ' 'GermaNet .xml files') parser = optparse.OptionParser(usage=usage) parser.add_option('--host', default=None, help='hostname or IP address of the MongoDB instance ' 'where the GermaNet database will be inserted ' '(default: %default)') parser.add_option('--port', type='int', default=None, help='port number of the MongoDB instance where the ' 'GermaNet database will be inserted (default: %default)') parser.add_option('--database', dest='database_name', default='germanet', help='the name of the database on the MongoDB instance ' 'where GermaNet will be stored (default: %default)') (options, args) = parser.parse_args() if len(args) != 1: parser.error("incorrect number of arguments") sys.exit(1) xml_path = args[0] client = MongoClient(options.host, options.port) germanet_db = client[options.database_name] lex_files, gn_rels_file, wiktionary_files, ili_files = \ find_germanet_xml_files(xml_path) insert_lexical_information(germanet_db, lex_files) insert_relation_information(germanet_db, gn_rels_file) insert_paraphrase_information(germanet_db, wiktionary_files) insert_lemmatisation_data(germanet_db) insert_infocontent_data(germanet_db) compute_max_min_depth(germanet_db) client.close()
[ "def", "main", "(", ")", ":", "usage", "=", "(", "'\\n\\n %prog [options] XML_PATH\\n\\nArguments:\\n\\n '", "'XML_PATH the directory containing the '", "'GermaNet .xml files'", ")", "parser", "=", "optparse", ".", "OptionParser", "(", "usage", "=", "usage", ")", "parser", ".", "add_option", "(", "'--host'", ",", "default", "=", "None", ",", "help", "=", "'hostname or IP address of the MongoDB instance '", "'where the GermaNet database will be inserted '", "'(default: %default)'", ")", "parser", ".", "add_option", "(", "'--port'", ",", "type", "=", "'int'", ",", "default", "=", "None", ",", "help", "=", "'port number of the MongoDB instance where the '", "'GermaNet database will be inserted (default: %default)'", ")", "parser", ".", "add_option", "(", "'--database'", ",", "dest", "=", "'database_name'", ",", "default", "=", "'germanet'", ",", "help", "=", "'the name of the database on the MongoDB instance '", "'where GermaNet will be stored (default: %default)'", ")", "(", "options", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "parser", ".", "error", "(", "\"incorrect number of arguments\"", ")", "sys", ".", "exit", "(", "1", ")", "xml_path", "=", "args", "[", "0", "]", "client", "=", "MongoClient", "(", "options", ".", "host", ",", "options", ".", "port", ")", "germanet_db", "=", "client", "[", "options", ".", "database_name", "]", "lex_files", ",", "gn_rels_file", ",", "wiktionary_files", ",", "ili_files", "=", "find_germanet_xml_files", "(", "xml_path", ")", "insert_lexical_information", "(", "germanet_db", ",", "lex_files", ")", "insert_relation_information", "(", "germanet_db", ",", "gn_rels_file", ")", "insert_paraphrase_information", "(", "germanet_db", ",", "wiktionary_files", ")", "insert_lemmatisation_data", "(", "germanet_db", ")", "insert_infocontent_data", "(", "germanet_db", ")", "compute_max_min_depth", "(", "germanet_db", ")", "client", ".", "close", "(", ")" ]
Main function.
[ "Main", "function", "." ]
train
https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L632-L669
alonho/pql
pql/matching.py
Operator.handle_In
def handle_In(self, node): '''in''' try: elts = node.elts except AttributeError: raise ParseError('Invalid value type for `in` operator: {0}'.format(node.__class__.__name__), col_offset=node.col_offset) return {'$in': list(map(self.field.handle, elts))}
python
def handle_In(self, node): '''in''' try: elts = node.elts except AttributeError: raise ParseError('Invalid value type for `in` operator: {0}'.format(node.__class__.__name__), col_offset=node.col_offset) return {'$in': list(map(self.field.handle, elts))}
[ "def", "handle_In", "(", "self", ",", "node", ")", ":", "try", ":", "elts", "=", "node", ".", "elts", "except", "AttributeError", ":", "raise", "ParseError", "(", "'Invalid value type for `in` operator: {0}'", ".", "format", "(", "node", ".", "__class__", ".", "__name__", ")", ",", "col_offset", "=", "node", ".", "col_offset", ")", "return", "{", "'$in'", ":", "list", "(", "map", "(", "self", ".", "field", ".", "handle", ",", "elts", ")", ")", "}" ]
in
[ "in" ]
train
https://github.com/alonho/pql/blob/fd8fefcb720b4325d27ab71f15a882fe5f9f77e2/pql/matching.py#L302-L309
jonathanslenders/textfsm
jtextfsm.py
TextFSMValue.AssignVar
def AssignVar(self, value): """Assign a value to this Value.""" self.value = value # Call OnAssignVar on options. [option.OnAssignVar() for option in self.options]
python
def AssignVar(self, value): """Assign a value to this Value.""" self.value = value # Call OnAssignVar on options. [option.OnAssignVar() for option in self.options]
[ "def", "AssignVar", "(", "self", ",", "value", ")", ":", "self", ".", "value", "=", "value", "# Call OnAssignVar on options.", "[", "option", ".", "OnAssignVar", "(", ")", "for", "option", "in", "self", ".", "options", "]" ]
Assign a value to this Value.
[ "Assign", "a", "value", "to", "this", "Value", "." ]
train
https://github.com/jonathanslenders/textfsm/blob/cca5084512d14bc367205aceb34c938ac1c65daf/jtextfsm.py#L223-L227
jonathanslenders/textfsm
jtextfsm.py
TextFSMValue.Parse
def Parse(self, value): """Parse a 'Value' declaration. Args: value: String line from a template file, must begin with 'Value '. Raises: TextFSMTemplateError: Value declaration contains an error. """ value_line = value.split(' ') if len(value_line) < 3: raise TextFSMTemplateError('Expect at least 3 tokens on line.') if not value_line[2].startswith('('): # Options are present options = value_line[1] for option in options.split(','): self._AddOption(option) # Call option OnCreateOptions callbacks [option.OnCreateOptions() for option in self.options] self.name = value_line[2] self.regex = ' '.join(value_line[3:]) else: # There were no valid options, so there are no options. # Treat this argument as the name. self.name = value_line[1] self.regex = ' '.join(value_line[2:]) if len(self.name) > self.max_name_len: raise TextFSMTemplateError( "Invalid Value name '%s' or name too long." % self.name) if (not re.match(r'^\(.*\)$', self.regex) or self.regex.count('(') != self.regex.count(')')): raise TextFSMTemplateError( "Value '%s' must be contained within a '()' pair." % self.regex) self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex)
python
def Parse(self, value): """Parse a 'Value' declaration. Args: value: String line from a template file, must begin with 'Value '. Raises: TextFSMTemplateError: Value declaration contains an error. """ value_line = value.split(' ') if len(value_line) < 3: raise TextFSMTemplateError('Expect at least 3 tokens on line.') if not value_line[2].startswith('('): # Options are present options = value_line[1] for option in options.split(','): self._AddOption(option) # Call option OnCreateOptions callbacks [option.OnCreateOptions() for option in self.options] self.name = value_line[2] self.regex = ' '.join(value_line[3:]) else: # There were no valid options, so there are no options. # Treat this argument as the name. self.name = value_line[1] self.regex = ' '.join(value_line[2:]) if len(self.name) > self.max_name_len: raise TextFSMTemplateError( "Invalid Value name '%s' or name too long." % self.name) if (not re.match(r'^\(.*\)$', self.regex) or self.regex.count('(') != self.regex.count(')')): raise TextFSMTemplateError( "Value '%s' must be contained within a '()' pair." % self.regex) self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex)
[ "def", "Parse", "(", "self", ",", "value", ")", ":", "value_line", "=", "value", ".", "split", "(", "' '", ")", "if", "len", "(", "value_line", ")", "<", "3", ":", "raise", "TextFSMTemplateError", "(", "'Expect at least 3 tokens on line.'", ")", "if", "not", "value_line", "[", "2", "]", ".", "startswith", "(", "'('", ")", ":", "# Options are present", "options", "=", "value_line", "[", "1", "]", "for", "option", "in", "options", ".", "split", "(", "','", ")", ":", "self", ".", "_AddOption", "(", "option", ")", "# Call option OnCreateOptions callbacks", "[", "option", ".", "OnCreateOptions", "(", ")", "for", "option", "in", "self", ".", "options", "]", "self", ".", "name", "=", "value_line", "[", "2", "]", "self", ".", "regex", "=", "' '", ".", "join", "(", "value_line", "[", "3", ":", "]", ")", "else", ":", "# There were no valid options, so there are no options.", "# Treat this argument as the name.", "self", ".", "name", "=", "value_line", "[", "1", "]", "self", ".", "regex", "=", "' '", ".", "join", "(", "value_line", "[", "2", ":", "]", ")", "if", "len", "(", "self", ".", "name", ")", ">", "self", ".", "max_name_len", ":", "raise", "TextFSMTemplateError", "(", "\"Invalid Value name '%s' or name too long.\"", "%", "self", ".", "name", ")", "if", "(", "not", "re", ".", "match", "(", "r'^\\(.*\\)$'", ",", "self", ".", "regex", ")", "or", "self", ".", "regex", ".", "count", "(", "'('", ")", "!=", "self", ".", "regex", ".", "count", "(", "')'", ")", ")", ":", "raise", "TextFSMTemplateError", "(", "\"Value '%s' must be contained within a '()' pair.\"", "%", "self", ".", "regex", ")", "self", ".", "template", "=", "re", ".", "sub", "(", "r'^\\('", ",", "'(?P<%s>'", "%", "self", ".", "name", ",", "self", ".", "regex", ")" ]
Parse a 'Value' declaration. Args: value: String line from a template file, must begin with 'Value '. Raises: TextFSMTemplateError: Value declaration contains an error.
[ "Parse", "a", "Value", "declaration", "." ]
train
https://github.com/jonathanslenders/textfsm/blob/cca5084512d14bc367205aceb34c938ac1c65daf/jtextfsm.py#L251-L291
jonathanslenders/textfsm
jtextfsm.py
TextFSM._CheckLine
def _CheckLine(self, line): """Passes the line through each rule until a match is made. Args: line: A string, the current input line. """ for rule in self._cur_state: matched = self._CheckRule(rule, line) if matched: for value in matched.groupdict(): self._AssignVar(matched, value) if self._Operations(rule): # Not a Continue so check for state transition. if rule.new_state: if rule.new_state not in ('End', 'EOF'): self._cur_state = self.states[rule.new_state] self._cur_state_name = rule.new_state break
python
def _CheckLine(self, line): """Passes the line through each rule until a match is made. Args: line: A string, the current input line. """ for rule in self._cur_state: matched = self._CheckRule(rule, line) if matched: for value in matched.groupdict(): self._AssignVar(matched, value) if self._Operations(rule): # Not a Continue so check for state transition. if rule.new_state: if rule.new_state not in ('End', 'EOF'): self._cur_state = self.states[rule.new_state] self._cur_state_name = rule.new_state break
[ "def", "_CheckLine", "(", "self", ",", "line", ")", ":", "for", "rule", "in", "self", ".", "_cur_state", ":", "matched", "=", "self", ".", "_CheckRule", "(", "rule", ",", "line", ")", "if", "matched", ":", "for", "value", "in", "matched", ".", "groupdict", "(", ")", ":", "self", ".", "_AssignVar", "(", "matched", ",", "value", ")", "if", "self", ".", "_Operations", "(", "rule", ")", ":", "# Not a Continue so check for state transition.", "if", "rule", ".", "new_state", ":", "if", "rule", ".", "new_state", "not", "in", "(", "'End'", ",", "'EOF'", ")", ":", "self", ".", "_cur_state", "=", "self", ".", "states", "[", "rule", ".", "new_state", "]", "self", ".", "_cur_state_name", "=", "rule", ".", "new_state", "break" ]
Passes the line through each rule until a match is made. Args: line: A string, the current input line.
[ "Passes", "the", "line", "through", "each", "rule", "until", "a", "match", "is", "made", "." ]
train
https://github.com/jonathanslenders/textfsm/blob/cca5084512d14bc367205aceb34c938ac1c65daf/jtextfsm.py#L866-L884
vimeo/graphite-influxdb
graphite_influxdb.py
_make_graphite_api_points_list
def _make_graphite_api_points_list(influxdb_data): """Make graphite-api data points dictionary from Influxdb ResultSet data""" _data = {} for key in influxdb_data.keys(): _data[key[0]] = [(datetime.datetime.fromtimestamp(float(d['time'])), d['value']) for d in influxdb_data.get_points(key[0])] return _data
python
def _make_graphite_api_points_list(influxdb_data): """Make graphite-api data points dictionary from Influxdb ResultSet data""" _data = {} for key in influxdb_data.keys(): _data[key[0]] = [(datetime.datetime.fromtimestamp(float(d['time'])), d['value']) for d in influxdb_data.get_points(key[0])] return _data
[ "def", "_make_graphite_api_points_list", "(", "influxdb_data", ")", ":", "_data", "=", "{", "}", "for", "key", "in", "influxdb_data", ".", "keys", "(", ")", ":", "_data", "[", "key", "[", "0", "]", "]", "=", "[", "(", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "float", "(", "d", "[", "'time'", "]", ")", ")", ",", "d", "[", "'value'", "]", ")", "for", "d", "in", "influxdb_data", ".", "get_points", "(", "key", "[", "0", "]", ")", "]", "return", "_data" ]
Make graphite-api data points dictionary from Influxdb ResultSet data
[ "Make", "graphite", "-", "api", "data", "points", "dictionary", "from", "Influxdb", "ResultSet", "data" ]
train
https://github.com/vimeo/graphite-influxdb/blob/56e4aeec57f39c90f14f3fbec57641b90d06c08b/graphite_influxdb.py#L90-L96
vimeo/graphite-influxdb
graphite_influxdb.py
InfluxdbFinder._setup_logger
def _setup_logger(self, level, log_file): """Setup log level and log file if set""" if logger.handlers: return level = getattr(logging, level.upper()) logger.setLevel(level) formatter = logging.Formatter( '[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s') handler = logging.StreamHandler() logger.addHandler(handler) handler.setFormatter(formatter) if not log_file: return try: handler = TimedRotatingFileHandler(log_file) except IOError: logger.error("Could not write to %s, falling back to stdout", log_file) else: logger.addHandler(handler) handler.setFormatter(formatter)
python
def _setup_logger(self, level, log_file): """Setup log level and log file if set""" if logger.handlers: return level = getattr(logging, level.upper()) logger.setLevel(level) formatter = logging.Formatter( '[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s') handler = logging.StreamHandler() logger.addHandler(handler) handler.setFormatter(formatter) if not log_file: return try: handler = TimedRotatingFileHandler(log_file) except IOError: logger.error("Could not write to %s, falling back to stdout", log_file) else: logger.addHandler(handler) handler.setFormatter(formatter)
[ "def", "_setup_logger", "(", "self", ",", "level", ",", "log_file", ")", ":", "if", "logger", ".", "handlers", ":", "return", "level", "=", "getattr", "(", "logging", ",", "level", ".", "upper", "(", ")", ")", "logger", ".", "setLevel", "(", "level", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s'", ")", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "logger", ".", "addHandler", "(", "handler", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "if", "not", "log_file", ":", "return", "try", ":", "handler", "=", "TimedRotatingFileHandler", "(", "log_file", ")", "except", "IOError", ":", "logger", ".", "error", "(", "\"Could not write to %s, falling back to stdout\"", ",", "log_file", ")", "else", ":", "logger", ".", "addHandler", "(", "handler", ")", "handler", ".", "setFormatter", "(", "formatter", ")" ]
Setup log level and log file if set
[ "Setup", "log", "level", "and", "log", "file", "if", "set" ]
train
https://github.com/vimeo/graphite-influxdb/blob/56e4aeec57f39c90f14f3fbec57641b90d06c08b/graphite_influxdb.py#L166-L186
vimeo/graphite-influxdb
graphite_influxdb.py
InfluxdbFinder.compile_regex
def compile_regex(self, fmt, query): """Turn glob (graphite) queries into compiled regex * becomes .* . becomes \. fmt argument is so that caller can control anchoring (must contain exactly 1 {0} !""" return re.compile(fmt.format( query.pattern.replace('.', '\.').replace('*', '[^\.]*').replace( '{', '(').replace(',', '|').replace('}', ')') ))
python
def compile_regex(self, fmt, query): """Turn glob (graphite) queries into compiled regex * becomes .* . becomes \. fmt argument is so that caller can control anchoring (must contain exactly 1 {0} !""" return re.compile(fmt.format( query.pattern.replace('.', '\.').replace('*', '[^\.]*').replace( '{', '(').replace(',', '|').replace('}', ')') ))
[ "def", "compile_regex", "(", "self", ",", "fmt", ",", "query", ")", ":", "return", "re", ".", "compile", "(", "fmt", ".", "format", "(", "query", ".", "pattern", ".", "replace", "(", "'.'", ",", "'\\.'", ")", ".", "replace", "(", "'*'", ",", "'[^\\.]*'", ")", ".", "replace", "(", "'{'", ",", "'('", ")", ".", "replace", "(", "','", ",", "'|'", ")", ".", "replace", "(", "'}'", ",", "')'", ")", ")", ")" ]
Turn glob (graphite) queries into compiled regex * becomes .* . becomes \. fmt argument is so that caller can control anchoring (must contain exactly 1 {0} !
[ "Turn", "glob", "(", "graphite", ")", "queries", "into", "compiled", "regex", "*", "becomes", ".", "*", ".", "becomes", "\\", ".", "fmt", "argument", "is", "so", "that", "caller", "can", "control", "anchoring", "(", "must", "contain", "exactly", "1", "{", "0", "}", "!" ]
train
https://github.com/vimeo/graphite-influxdb/blob/56e4aeec57f39c90f14f3fbec57641b90d06c08b/graphite_influxdb.py#L230-L238
alonho/pql
pql/__init__.py
find
def find(expression, schema=None): ''' Gets an <expression> and optional <schema>. <expression> should be a string of python code. <schema> should be a dictionary mapping field names to types. ''' parser = SchemaFreeParser() if schema is None else SchemaAwareParser(schema) return parser.parse(expression)
python
def find(expression, schema=None): ''' Gets an <expression> and optional <schema>. <expression> should be a string of python code. <schema> should be a dictionary mapping field names to types. ''' parser = SchemaFreeParser() if schema is None else SchemaAwareParser(schema) return parser.parse(expression)
[ "def", "find", "(", "expression", ",", "schema", "=", "None", ")", ":", "parser", "=", "SchemaFreeParser", "(", ")", "if", "schema", "is", "None", "else", "SchemaAwareParser", "(", "schema", ")", "return", "parser", ".", "parse", "(", "expression", ")" ]
Gets an <expression> and optional <schema>. <expression> should be a string of python code. <schema> should be a dictionary mapping field names to types.
[ "Gets", "an", "<expression", ">", "and", "optional", "<schema", ">", ".", "<expression", ">", "should", "be", "a", "string", "of", "python", "code", ".", "<schema", ">", "should", "be", "a", "dictionary", "mapping", "field", "names", "to", "types", "." ]
train
https://github.com/alonho/pql/blob/fd8fefcb720b4325d27ab71f15a882fe5f9f77e2/pql/__init__.py#L7-L14
alonho/pql
pql/__init__.py
sort
def sort(fields): ''' Gets a list of <fields> to sort by. Also supports getting a single string for sorting by one field. Reverse sort is supported by appending '-' to the field name. Example: sort(['age', '-height']) will sort by ascending age and descending height. ''' from pymongo import ASCENDING, DESCENDING from bson import SON if isinstance(fields, str): fields = [fields] if not hasattr(fields, '__iter__'): raise ValueError("expected a list of strings or a string. not a {}".format(type(fields))) sort = [] for field in fields: if field.startswith('-'): field = field[1:] sort.append((field, DESCENDING)) continue elif field.startswith('+'): field = field[1:] sort.append((field, ASCENDING)) return {'$sort': SON(sort)}
python
def sort(fields): ''' Gets a list of <fields> to sort by. Also supports getting a single string for sorting by one field. Reverse sort is supported by appending '-' to the field name. Example: sort(['age', '-height']) will sort by ascending age and descending height. ''' from pymongo import ASCENDING, DESCENDING from bson import SON if isinstance(fields, str): fields = [fields] if not hasattr(fields, '__iter__'): raise ValueError("expected a list of strings or a string. not a {}".format(type(fields))) sort = [] for field in fields: if field.startswith('-'): field = field[1:] sort.append((field, DESCENDING)) continue elif field.startswith('+'): field = field[1:] sort.append((field, ASCENDING)) return {'$sort': SON(sort)}
[ "def", "sort", "(", "fields", ")", ":", "from", "pymongo", "import", "ASCENDING", ",", "DESCENDING", "from", "bson", "import", "SON", "if", "isinstance", "(", "fields", ",", "str", ")", ":", "fields", "=", "[", "fields", "]", "if", "not", "hasattr", "(", "fields", ",", "'__iter__'", ")", ":", "raise", "ValueError", "(", "\"expected a list of strings or a string. not a {}\"", ".", "format", "(", "type", "(", "fields", ")", ")", ")", "sort", "=", "[", "]", "for", "field", "in", "fields", ":", "if", "field", ".", "startswith", "(", "'-'", ")", ":", "field", "=", "field", "[", "1", ":", "]", "sort", ".", "append", "(", "(", "field", ",", "DESCENDING", ")", ")", "continue", "elif", "field", ".", "startswith", "(", "'+'", ")", ":", "field", "=", "field", "[", "1", ":", "]", "sort", ".", "append", "(", "(", "field", ",", "ASCENDING", ")", ")", "return", "{", "'$sort'", ":", "SON", "(", "sort", ")", "}" ]
Gets a list of <fields> to sort by. Also supports getting a single string for sorting by one field. Reverse sort is supported by appending '-' to the field name. Example: sort(['age', '-height']) will sort by ascending age and descending height.
[ "Gets", "a", "list", "of", "<fields", ">", "to", "sort", "by", ".", "Also", "supports", "getting", "a", "single", "string", "for", "sorting", "by", "one", "field", ".", "Reverse", "sort", "is", "supported", "by", "appending", "-", "to", "the", "field", "name", ".", "Example", ":", "sort", "(", "[", "age", "-", "height", "]", ")", "will", "sort", "by", "ascending", "age", "and", "descending", "height", "." ]
train
https://github.com/alonho/pql/blob/fd8fefcb720b4325d27ab71f15a882fe5f9f77e2/pql/__init__.py#L73-L97
numberoverzero/bloop
bloop/util.py
ordered
def ordered(obj): """ Return sorted version of nested dicts/lists for comparing. Modified from: http://stackoverflow.com/a/25851972 """ if isinstance(obj, collections.abc.Mapping): return sorted((k, ordered(v)) for k, v in obj.items()) # Special case str since it's a collections.abc.Iterable elif isinstance(obj, str): return obj elif isinstance(obj, collections.abc.Iterable): return sorted(ordered(x) for x in obj) else: return obj
python
def ordered(obj): """ Return sorted version of nested dicts/lists for comparing. Modified from: http://stackoverflow.com/a/25851972 """ if isinstance(obj, collections.abc.Mapping): return sorted((k, ordered(v)) for k, v in obj.items()) # Special case str since it's a collections.abc.Iterable elif isinstance(obj, str): return obj elif isinstance(obj, collections.abc.Iterable): return sorted(ordered(x) for x in obj) else: return obj
[ "def", "ordered", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "collections", ".", "abc", ".", "Mapping", ")", ":", "return", "sorted", "(", "(", "k", ",", "ordered", "(", "v", ")", ")", "for", "k", ",", "v", "in", "obj", ".", "items", "(", ")", ")", "# Special case str since it's a collections.abc.Iterable", "elif", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "obj", "elif", "isinstance", "(", "obj", ",", "collections", ".", "abc", ".", "Iterable", ")", ":", "return", "sorted", "(", "ordered", "(", "x", ")", "for", "x", "in", "obj", ")", "else", ":", "return", "obj" ]
Return sorted version of nested dicts/lists for comparing. Modified from: http://stackoverflow.com/a/25851972
[ "Return", "sorted", "version", "of", "nested", "dicts", "/", "lists", "for", "comparing", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/util.py#L65-L80
numberoverzero/bloop
bloop/util.py
walk_subclasses
def walk_subclasses(root): """Does not yield the input class""" classes = [root] visited = set() while classes: cls = classes.pop() if cls is type or cls in visited: continue classes.extend(cls.__subclasses__()) visited.add(cls) if cls is not root: yield cls
python
def walk_subclasses(root): """Does not yield the input class""" classes = [root] visited = set() while classes: cls = classes.pop() if cls is type or cls in visited: continue classes.extend(cls.__subclasses__()) visited.add(cls) if cls is not root: yield cls
[ "def", "walk_subclasses", "(", "root", ")", ":", "classes", "=", "[", "root", "]", "visited", "=", "set", "(", ")", "while", "classes", ":", "cls", "=", "classes", ".", "pop", "(", ")", "if", "cls", "is", "type", "or", "cls", "in", "visited", ":", "continue", "classes", ".", "extend", "(", "cls", ".", "__subclasses__", "(", ")", ")", "visited", ".", "add", "(", "cls", ")", "if", "cls", "is", "not", "root", ":", "yield", "cls" ]
Does not yield the input class
[ "Does", "not", "yield", "the", "input", "class" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/util.py#L83-L94
numberoverzero/bloop
bloop/util.py
dump_key
def dump_key(engine, obj): """dump the hash (and range, if there is one) key(s) of an object into a dynamo-friendly format. returns {dynamo_name: {type: value} for dynamo_name in hash/range keys} """ key = {} for key_column in obj.Meta.keys: key_value = getattr(obj, key_column.name, missing) if key_value is missing: raise MissingKey("{!r} is missing {}: {!r}".format( obj, "hash_key" if key_column.hash_key else "range_key", key_column.name )) # noinspection PyProtectedMember key_value = engine._dump(key_column.typedef, key_value) key[key_column.dynamo_name] = key_value return key
python
def dump_key(engine, obj): """dump the hash (and range, if there is one) key(s) of an object into a dynamo-friendly format. returns {dynamo_name: {type: value} for dynamo_name in hash/range keys} """ key = {} for key_column in obj.Meta.keys: key_value = getattr(obj, key_column.name, missing) if key_value is missing: raise MissingKey("{!r} is missing {}: {!r}".format( obj, "hash_key" if key_column.hash_key else "range_key", key_column.name )) # noinspection PyProtectedMember key_value = engine._dump(key_column.typedef, key_value) key[key_column.dynamo_name] = key_value return key
[ "def", "dump_key", "(", "engine", ",", "obj", ")", ":", "key", "=", "{", "}", "for", "key_column", "in", "obj", ".", "Meta", ".", "keys", ":", "key_value", "=", "getattr", "(", "obj", ",", "key_column", ".", "name", ",", "missing", ")", "if", "key_value", "is", "missing", ":", "raise", "MissingKey", "(", "\"{!r} is missing {}: {!r}\"", ".", "format", "(", "obj", ",", "\"hash_key\"", "if", "key_column", ".", "hash_key", "else", "\"range_key\"", ",", "key_column", ".", "name", ")", ")", "# noinspection PyProtectedMember", "key_value", "=", "engine", ".", "_dump", "(", "key_column", ".", "typedef", ",", "key_value", ")", "key", "[", "key_column", ".", "dynamo_name", "]", "=", "key_value", "return", "key" ]
dump the hash (and range, if there is one) key(s) of an object into a dynamo-friendly format. returns {dynamo_name: {type: value} for dynamo_name in hash/range keys}
[ "dump", "the", "hash", "(", "and", "range", "if", "there", "is", "one", ")", "key", "(", "s", ")", "of", "an", "object", "into", "a", "dynamo", "-", "friendly", "format", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/util.py#L125-L142
numberoverzero/bloop
examples/mixins.py
new_expiry
def new_expiry(days=DEFAULT_PASTE_LIFETIME_DAYS): """Return an expiration `days` in the future""" now = delorean.Delorean() return now + datetime.timedelta(days=days)
python
def new_expiry(days=DEFAULT_PASTE_LIFETIME_DAYS): """Return an expiration `days` in the future""" now = delorean.Delorean() return now + datetime.timedelta(days=days)
[ "def", "new_expiry", "(", "days", "=", "DEFAULT_PASTE_LIFETIME_DAYS", ")", ":", "now", "=", "delorean", ".", "Delorean", "(", ")", "return", "now", "+", "datetime", ".", "timedelta", "(", "days", "=", "days", ")" ]
Return an expiration `days` in the future
[ "Return", "an", "expiration", "days", "in", "the", "future" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/examples/mixins.py#L14-L17
numberoverzero/bloop
bloop/conditions.py
sync
def sync(obj, engine): """Mark the object as having been persisted at least once. Store the latest snapshot of all marked values.""" snapshot = Condition() # Only expect values (or lack of a value) for columns that have been explicitly set for column in sorted(_obj_tracking[obj]["marked"], key=lambda col: col.dynamo_name): value = getattr(obj, column.name, None) value = engine._dump(column.typedef, value) condition = column == value # The renderer shouldn't try to dump the value again. # We're dumping immediately in case the value is mutable, # such as a set or (many) custom data types. condition.dumped = True snapshot &= condition _obj_tracking[obj]["snapshot"] = snapshot
python
def sync(obj, engine): """Mark the object as having been persisted at least once. Store the latest snapshot of all marked values.""" snapshot = Condition() # Only expect values (or lack of a value) for columns that have been explicitly set for column in sorted(_obj_tracking[obj]["marked"], key=lambda col: col.dynamo_name): value = getattr(obj, column.name, None) value = engine._dump(column.typedef, value) condition = column == value # The renderer shouldn't try to dump the value again. # We're dumping immediately in case the value is mutable, # such as a set or (many) custom data types. condition.dumped = True snapshot &= condition _obj_tracking[obj]["snapshot"] = snapshot
[ "def", "sync", "(", "obj", ",", "engine", ")", ":", "snapshot", "=", "Condition", "(", ")", "# Only expect values (or lack of a value) for columns that have been explicitly set", "for", "column", "in", "sorted", "(", "_obj_tracking", "[", "obj", "]", "[", "\"marked\"", "]", ",", "key", "=", "lambda", "col", ":", "col", ".", "dynamo_name", ")", ":", "value", "=", "getattr", "(", "obj", ",", "column", ".", "name", ",", "None", ")", "value", "=", "engine", ".", "_dump", "(", "column", ".", "typedef", ",", "value", ")", "condition", "=", "column", "==", "value", "# The renderer shouldn't try to dump the value again.", "# We're dumping immediately in case the value is mutable,", "# such as a set or (many) custom data types.", "condition", ".", "dumped", "=", "True", "snapshot", "&=", "condition", "_obj_tracking", "[", "obj", "]", "[", "\"snapshot\"", "]", "=", "snapshot" ]
Mark the object as having been persisted at least once. Store the latest snapshot of all marked values.
[ "Mark", "the", "object", "as", "having", "been", "persisted", "at", "least", "once", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L63-L78
numberoverzero/bloop
bloop/conditions.py
printable_name
def printable_name(column, path=None): """Provided for debug output when rendering conditions. User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar """ pieces = [column.name] path = path or path_of(column) for segment in path: if isinstance(segment, str): pieces.append(segment) else: pieces[-1] += "[{}]".format(segment) return ".".join(pieces)
python
def printable_name(column, path=None): """Provided for debug output when rendering conditions. User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar """ pieces = [column.name] path = path or path_of(column) for segment in path: if isinstance(segment, str): pieces.append(segment) else: pieces[-1] += "[{}]".format(segment) return ".".join(pieces)
[ "def", "printable_name", "(", "column", ",", "path", "=", "None", ")", ":", "pieces", "=", "[", "column", ".", "name", "]", "path", "=", "path", "or", "path_of", "(", "column", ")", "for", "segment", "in", "path", ":", "if", "isinstance", "(", "segment", ",", "str", ")", ":", "pieces", ".", "append", "(", "segment", ")", "else", ":", "pieces", "[", "-", "1", "]", "+=", "\"[{}]\"", ".", "format", "(", "segment", ")", "return", "\".\"", ".", "join", "(", "pieces", ")" ]
Provided for debug output when rendering conditions. User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
[ "Provided", "for", "debug", "output", "when", "rendering", "conditions", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L887-L899
numberoverzero/bloop
bloop/conditions.py
iter_conditions
def iter_conditions(condition): """Yield all conditions within the given condition. If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found).""" conditions = list() visited = set() # Has to be split out, since we don't want to visit the root (for cyclic conditions) # but we don't want to yield it (if it's non-cyclic) because this only yields inner conditions if condition.operation in {"and", "or"}: conditions.extend(reversed(condition.values)) elif condition.operation == "not": conditions.append(condition.values[0]) else: conditions.append(condition) while conditions: condition = conditions.pop() if condition in visited: continue visited.add(condition) yield condition if condition.operation in {"and", "or", "not"}: conditions.extend(reversed(condition.values))
python
def iter_conditions(condition): """Yield all conditions within the given condition. If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found).""" conditions = list() visited = set() # Has to be split out, since we don't want to visit the root (for cyclic conditions) # but we don't want to yield it (if it's non-cyclic) because this only yields inner conditions if condition.operation in {"and", "or"}: conditions.extend(reversed(condition.values)) elif condition.operation == "not": conditions.append(condition.values[0]) else: conditions.append(condition) while conditions: condition = conditions.pop() if condition in visited: continue visited.add(condition) yield condition if condition.operation in {"and", "or", "not"}: conditions.extend(reversed(condition.values))
[ "def", "iter_conditions", "(", "condition", ")", ":", "conditions", "=", "list", "(", ")", "visited", "=", "set", "(", ")", "# Has to be split out, since we don't want to visit the root (for cyclic conditions)", "# but we don't want to yield it (if it's non-cyclic) because this only yields inner conditions", "if", "condition", ".", "operation", "in", "{", "\"and\"", ",", "\"or\"", "}", ":", "conditions", ".", "extend", "(", "reversed", "(", "condition", ".", "values", ")", ")", "elif", "condition", ".", "operation", "==", "\"not\"", ":", "conditions", ".", "append", "(", "condition", ".", "values", "[", "0", "]", ")", "else", ":", "conditions", ".", "append", "(", "condition", ")", "while", "conditions", ":", "condition", "=", "conditions", ".", "pop", "(", ")", "if", "condition", "in", "visited", ":", "continue", "visited", ".", "add", "(", "condition", ")", "yield", "condition", "if", "condition", ".", "operation", "in", "{", "\"and\"", ",", "\"or\"", ",", "\"not\"", "}", ":", "conditions", ".", "extend", "(", "reversed", "(", "condition", ".", "values", ")", ")" ]
Yield all conditions within the given condition. If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found).
[ "Yield", "all", "conditions", "within", "the", "given", "condition", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L914-L935
numberoverzero/bloop
bloop/conditions.py
iter_columns
def iter_columns(condition): """ Yield all columns in the condition or its inner conditions. Unwraps proxies when the condition's column (or any of its values) include paths. """ # Like iter_conditions, this can't live in each condition without going possibly infinite on the # recursion, or passing the visited set through every call. That makes the signature ugly, so we # take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the # actual columns. visited = set() for condition in iter_conditions(condition): if condition.operation in ("and", "or", "not"): continue # Non-meta conditions always have a column, and each of values has the potential to be a column. # Comparison will only have a list of len 1, but it's simpler to just iterate values and check each # unwrap proxies created for paths column = proxied(condition.column) # special case for None # this could also have skipped on isinstance(condition, Condition) # but this is slightly more flexible for users to create their own None-sentinel Conditions if column is None: continue if column not in visited: visited.add(column) yield column for value in condition.values: if isinstance(value, ComparisonMixin): if value not in visited: visited.add(value) yield value
python
def iter_columns(condition): """ Yield all columns in the condition or its inner conditions. Unwraps proxies when the condition's column (or any of its values) include paths. """ # Like iter_conditions, this can't live in each condition without going possibly infinite on the # recursion, or passing the visited set through every call. That makes the signature ugly, so we # take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the # actual columns. visited = set() for condition in iter_conditions(condition): if condition.operation in ("and", "or", "not"): continue # Non-meta conditions always have a column, and each of values has the potential to be a column. # Comparison will only have a list of len 1, but it's simpler to just iterate values and check each # unwrap proxies created for paths column = proxied(condition.column) # special case for None # this could also have skipped on isinstance(condition, Condition) # but this is slightly more flexible for users to create their own None-sentinel Conditions if column is None: continue if column not in visited: visited.add(column) yield column for value in condition.values: if isinstance(value, ComparisonMixin): if value not in visited: visited.add(value) yield value
[ "def", "iter_columns", "(", "condition", ")", ":", "# Like iter_conditions, this can't live in each condition without going possibly infinite on the", "# recursion, or passing the visited set through every call. That makes the signature ugly, so we", "# take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the", "# actual columns.", "visited", "=", "set", "(", ")", "for", "condition", "in", "iter_conditions", "(", "condition", ")", ":", "if", "condition", ".", "operation", "in", "(", "\"and\"", ",", "\"or\"", ",", "\"not\"", ")", ":", "continue", "# Non-meta conditions always have a column, and each of values has the potential to be a column.", "# Comparison will only have a list of len 1, but it's simpler to just iterate values and check each", "# unwrap proxies created for paths", "column", "=", "proxied", "(", "condition", ".", "column", ")", "# special case for None", "# this could also have skipped on isinstance(condition, Condition)", "# but this is slightly more flexible for users to create their own None-sentinel Conditions", "if", "column", "is", "None", ":", "continue", "if", "column", "not", "in", "visited", ":", "visited", ".", "add", "(", "column", ")", "yield", "column", "for", "value", "in", "condition", ".", "values", ":", "if", "isinstance", "(", "value", ",", "ComparisonMixin", ")", ":", "if", "value", "not", "in", "visited", ":", "visited", ".", "add", "(", "value", ")", "yield", "value" ]
Yield all columns in the condition or its inner conditions. Unwraps proxies when the condition's column (or any of its values) include paths.
[ "Yield", "all", "columns", "in", "the", "condition", "or", "its", "inner", "conditions", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L938-L970