sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def dev_assistant_start(self):
"""
Thread executes devassistant API.
"""
#logger_gui.info("Thread run")
path = self.top_assistant.get_selected_subassistant_path(**self.kwargs)
kwargs_decoded = dict()
for k, v in self.kwargs.items():
kwargs_decoded[k] = \
v.decode(utils.defenc) if not six.PY3 and isinstance(v, str) else v
self.dev_assistant_runner = path_runner.PathRunner(path, kwargs_decoded)
try:
self.dev_assistant_runner.run()
Gdk.threads_enter()
if not self.project_canceled:
message = '<span color="#008000">Done</span>'
link = True
back = False
else:
message = '<span color="#FF0000">Failed</span>'
link = False
back = True
self.allow_buttons(message=message, link=link, back=back)
Gdk.threads_leave()
except exceptions.ClException as cle:
msg = replace_markup_chars(cle.message)
if not six.PY3:
msg = msg.encode(utils.defenc)
self.allow_buttons(back=True, link=False,
message='<span color="#FF0000">Failed: {0}</span>'.
format(msg))
except exceptions.ExecutionException as exe:
msg = replace_markup_chars(six.text_type(exe))
if not six.PY3:
msg = msg.encode(utils.defenc)
self.allow_buttons(back=True, link=False,
message='<span color="#FF0000">Failed: {0}</span>'.
format((msg[:80] + '...') if len(msg) > 80 else msg))
except IOError as ioe:
self.allow_buttons(back=True, link=False,
message='<span color="#FF0000">Failed: {0}</span>'.
format((ioe.message[:80] + '...') if len(ioe.message) > 80 else ioe.message))
|
Thread executes devassistant API.
|
entailment
|
def debug_btn_clicked(self, widget, data=None):
"""
Event in case that debug button is pressed.
"""
self.store.clear()
self.thread = threading.Thread(target=self.logs_update)
self.thread.start()
|
Event in case that debug button is pressed.
|
entailment
|
def logs_update(self):
"""
Function updates logs.
"""
Gdk.threads_enter()
if not self.debugging:
self.debugging = True
self.debug_btn.set_label('Info logs')
else:
self.debugging = False
self.debug_btn.set_label('Debug logs')
for record in self.debug_logs['logs']:
if self.debugging:
# Create a new root tree element
if getattr(record, 'event_type', '') != "cmd_retcode":
self.store.append([format_entry(record, show_level=True, colorize=True)])
else:
if int(record.levelno) > 10:
self.store.append([format_entry(record, colorize=True)])
Gdk.threads_leave()
|
Function updates logs.
|
entailment
|
def clipboard_btn_clicked(self, widget, data=None):
"""
Function copies logs to clipboard.
"""
_clipboard_text = []
for record in self.debug_logs['logs']:
if self.debugging:
_clipboard_text.append(format_entry(record, show_level=True))
else:
if int(record.levelno) > 10:
if getattr(record, 'event_type', ''):
if not record.event_type.startswith("dep_"):
_clipboard_text.append(format_entry(record))
else:
_clipboard_text.append(format_entry(record))
self.gui_helper.create_clipboard(_clipboard_text)
|
Function copies logs to clipboard.
|
entailment
|
def back_btn_clicked(self, widget, data=None):
"""
Event for back button.
This occurs in case of devassistant fail.
"""
self.remove_link_button()
self.run_window.hide()
self.parent.path_window.path_window.show()
|
Event for back button.
This occurs in case of devassistant fail.
|
entailment
|
def main_btn_clicked(self, widget, data=None):
"""
Button switches to Dev Assistant GUI main window
"""
self.remove_link_button()
data = dict()
data['debugging'] = self.debugging
self.run_window.hide()
self.parent.open_window(widget, data)
|
Button switches to Dev Assistant GUI main window
|
entailment
|
def list_view_row_clicked(self, list_view, path, view_column):
"""
Function opens the firefox window with relevant link
"""
model = list_view.get_model()
text = model[path][0]
match = URL_FINDER.search(text)
if match is not None:
url = match.group(1)
import webbrowser
webbrowser.open(url)
|
Function opens the firefox window with relevant link
|
entailment
|
def _github_create_twofactor_authorization(cls, ui):
"""Create an authorization for a GitHub user using two-factor
authentication. Unlike its non-two-factor counterpart, this method
does not traverse the available authentications as they are not
visible until the user logs in.
Please note: cls._user's attributes are not accessible until the
authorization is created due to the way (py)github works.
"""
try:
try: # This is necessary to trigger sending a 2FA key to the user
auth = cls._user.create_authorization()
except cls._gh_exceptions.GithubException:
onetime_pw = DialogHelper.ask_for_password(ui, prompt='Your one time password:')
auth = cls._user.create_authorization(scopes=['repo', 'user', 'admin:public_key'],
note="DevAssistant",
onetime_password=onetime_pw)
cls._user = cls._gh_module.Github(login_or_token=auth.token).get_user()
logger.debug('Two-factor authorization for user "{0}" created'.format(cls._user.login))
cls._github_store_authorization(cls._user, auth)
logger.debug('Two-factor authorization token stored')
except cls._gh_exceptions.GithubException as e:
logger.warning('Creating two-factor authorization failed: {0}'.format(e))
|
Create an authorization for a GitHub user using two-factor
authentication. Unlike its non-two-factor counterpart, this method
does not traverse the available authentications as they are not
visible until the user logs in.
Please note: cls._user's attributes are not accessible until the
authorization is created due to the way (py)github works.
|
entailment
|
def _github_create_simple_authorization(cls):
"""Create a GitHub authorization for the given user in case they don't
already have one.
"""
try:
auth = None
for a in cls._user.get_authorizations():
if a.note == 'DevAssistant':
auth = a
if not auth:
auth = cls._user.create_authorization(
scopes=['repo', 'user', 'admin:public_key'],
note="DevAssistant")
cls._github_store_authorization(cls._user, auth)
except cls._gh_exceptions.GithubException as e:
logger.warning('Creating authorization failed: {0}'.format(e))
|
Create a GitHub authorization for the given user in case they don't
already have one.
|
entailment
|
def _github_store_authorization(cls, user, auth):
"""Store an authorization token for the given GitHub user in the git
global config file.
"""
ClHelper.run_command("git config --global github.token.{login} {token}".format(
login=user.login, token=auth.token), log_secret=True)
ClHelper.run_command("git config --global github.user.{login} {login}".format(
login=user.login))
|
Store an authorization token for the given GitHub user in the git
global config file.
|
entailment
|
def _start_ssh_agent(cls):
"""Starts ssh-agent and returns the environment variables related to it"""
env = dict()
stdout = ClHelper.run_command('ssh-agent -s')
lines = stdout.split('\n')
for line in lines:
if not line or line.startswith('echo '):
continue
line = line.split(';')[0]
parts = line.split('=')
if len(parts) == 2:
env[parts[0]] = parts[1]
return env
|
Starts ssh-agent and returns the environment variables related to it
|
entailment
|
def _github_create_ssh_key(cls):
"""Creates a local ssh key, if it doesn't exist already, and uploads it to Github."""
try:
login = cls._user.login
pkey_path = '{home}/.ssh/{keyname}'.format(
home=os.path.expanduser('~'),
keyname=settings.GITHUB_SSH_KEYNAME.format(login=login))
# generate ssh key only if it doesn't exist
if not os.path.exists(pkey_path):
ClHelper.run_command('ssh-keygen -t rsa -f {pkey_path}\
-N \"\" -C \"DevAssistant\"'.
format(pkey_path=pkey_path))
try:
ClHelper.run_command('ssh-add {pkey_path}'.format(pkey_path=pkey_path))
except exceptions.ClException:
# ssh agent might not be running
env = cls._start_ssh_agent()
ClHelper.run_command('ssh-add {pkey_path}'.format(pkey_path=pkey_path), env=env)
public_key = ClHelper.run_command('cat {pkey_path}.pub'.format(pkey_path=pkey_path))
cls._user.create_key("DevAssistant", public_key)
except exceptions.ClException as e:
msg = 'Couldn\'t create a new ssh key: {0}'.format(e)
raise exceptions.CommandException(msg)
|
Creates a local ssh key, if it doesn't exist already, and uploads it to Github.
|
entailment
|
def _github_ssh_key_exists(cls):
"""Returns True if any key on Github matches a local key, else False."""
remote_keys = map(lambda k: k._key, cls._user.get_keys())
found = False
pubkey_files = glob.glob(os.path.expanduser('~/.ssh/*.pub'))
for rk in remote_keys:
for pkf in pubkey_files:
local_key = io.open(pkf, encoding='utf-8').read()
# in PyGithub 1.23.0, remote key is an object, not string
rkval = rk if isinstance(rk, six.string_types) else rk.value
# don't use "==" because we have comments etc added in public_key
if rkval in local_key:
found = True
break
return found
|
Returns True if any key on Github matches a local key, else False.
|
entailment
|
def github_authenticated(cls, func):
"""Does user authentication, creates SSH keys if needed and injects "_user" attribute
into class/object bound to the decorated function.
Don't call any other methods of this class manually, this should be everything you need.
"""
def inner(func_cls, *args, **kwargs):
if not cls._gh_module:
logger.warning('PyGithub not installed, skipping Github auth procedures.')
elif not func_cls._user:
# authenticate user, possibly also creating authentication for future use
login = kwargs['login'].encode(utils.defenc) if not six.PY3 else kwargs['login']
func_cls._user = cls._get_github_user(login, kwargs['ui'])
if func_cls._user is None:
msg = 'Github authentication failed, skipping Github command.'
logger.warning(msg)
return (False, msg)
# create an ssh key for pushing if we don't have one
if not cls._github_ssh_key_exists():
cls._github_create_ssh_key()
# next, create ~/.ssh/config entry for the key, if system username != GH login
if cls._ssh_key_needs_config_entry():
cls._create_ssh_config_entry()
return func(func_cls, *args, **kwargs)
return inner
|
Does user authentication, creates SSH keys if needed and injects "_user" attribute
into class/object bound to the decorated function.
Don't call any other methods of this class manually, this should be everything you need.
|
entailment
|
def get_assistants(cls, superassistants):
"""Returns list of assistants that are subassistants of given superassistants
(I love this docstring).
Args:
roles: list of names of roles, defaults to all roles
Returns:
list of YamlAssistant instances with specified roles
"""
_assistants = cls.load_all_assistants(superassistants)
result = []
for supa in superassistants:
result.extend(_assistants[supa.name])
return result
|
Returns list of assistants that are subassistants of given superassistants
(I love this docstring).
Args:
roles: list of names of roles, defaults to all roles
Returns:
list of YamlAssistant instances with specified roles
|
entailment
|
def load_all_assistants(cls, superassistants):
"""Fills self._assistants with loaded YamlAssistant instances of requested roles.
Tries to use cache (updated/created if needed). If cache is unusable, it
falls back to loading all assistants.
Args:
roles: list of required assistant roles
"""
# mapping of assistant roles to lists of top-level assistant instances
_assistants = {}
# {'crt': CreatorAssistant, ...}
superas_dict = dict(map(lambda a: (a.name, a), superassistants))
to_load = set(superas_dict.keys())
for tl in to_load:
dirs = [os.path.join(d, tl) for d in cls.assistants_dirs]
file_hierarchy = cls.get_assistants_file_hierarchy(dirs)
# load all if we're not using cache or if we fail to load it
load_all = not settings.USE_CACHE
if settings.USE_CACHE:
try:
cch = cache.Cache()
cch.refresh_role(tl, file_hierarchy)
_assistants[tl] = cls.get_assistants_from_cache_hierarchy(cch.cache[tl],
superas_dict[tl],
role=tl)
except BaseException as e:
logger.debug('Failed to use DevAssistant cachefile {0}: {1}'.format(
settings.CACHE_FILE, e))
load_all = True
if load_all:
_assistants[tl] = cls.get_assistants_from_file_hierarchy(file_hierarchy,
superas_dict[tl],
role=tl)
return _assistants
|
Fills self._assistants with loaded YamlAssistant instances of requested roles.
Tries to use cache (updated/created if needed). If cache is unusable, it
falls back to loading all assistants.
Args:
roles: list of required assistant roles
|
entailment
|
def get_assistants_from_cache_hierarchy(cls, cache_hierarchy, superassistant,
role=settings.DEFAULT_ASSISTANT_ROLE):
"""Accepts cache_hierarch as described in devassistant.cache and returns
instances of YamlAssistant (only with cached attributes) for loaded files
Args:
cache_hierarchy: structure as described in devassistant.cache
role: role of all assistants in this hierarchy (we could find
this out dynamically but it's not worth the pain)
Returns:
list of top level assistants from given hierarchy; these assistants contain
references to instances of their subassistants (and their subassistants, ...)
Note, that the assistants are not fully loaded, but contain just cached attrs.
"""
result = []
for name, attrs in cache_hierarchy.items():
ass = cls.assistant_from_yaml(attrs['source'],
{name: attrs['attrs']},
superassistant,
fully_loaded=False,
role=role)
ass._subassistants = cls.get_assistants_from_cache_hierarchy(attrs['subhierarchy'],
ass,
role=role)
result.append(ass)
return result
|
Accepts cache_hierarch as described in devassistant.cache and returns
instances of YamlAssistant (only with cached attributes) for loaded files
Args:
cache_hierarchy: structure as described in devassistant.cache
role: role of all assistants in this hierarchy (we could find
this out dynamically but it's not worth the pain)
Returns:
list of top level assistants from given hierarchy; these assistants contain
references to instances of their subassistants (and their subassistants, ...)
Note, that the assistants are not fully loaded, but contain just cached attrs.
|
entailment
|
def get_assistants_from_file_hierarchy(cls, file_hierarchy, superassistant,
role=settings.DEFAULT_ASSISTANT_ROLE):
"""Accepts file_hierarch as returned by cls.get_assistant_file_hierarchy and returns
instances of YamlAssistant for loaded files
Args:
file_hierarchy: structure as described in cls.get_assistants_file_hierarchy
role: role of all assistants in this hierarchy (we could find
this out dynamically but it's not worth the pain)
Returns:
list of top level assistants from given hierarchy; these assistants contain
references to instances of their subassistants (and their subassistants, ...)
"""
result = []
warn_msg = 'Failed to load assistant {source}, skipping subassistants.'
for name, attrs in file_hierarchy.items():
loaded_yaml = yaml_loader.YamlLoader.load_yaml_by_path(attrs['source'])
if loaded_yaml is None: # there was an error parsing yaml
logger.warning(warn_msg.format(source=attrs['source']))
continue
try:
ass = cls.assistant_from_yaml(attrs['source'],
loaded_yaml,
superassistant,
role=role)
except exceptions.YamlError as e:
logger.warning(e)
continue
ass._subassistants = cls.get_assistants_from_file_hierarchy(attrs['subhierarchy'],
ass,
role=role)
result.append(ass)
return result
|
Accepts file_hierarch as returned by cls.get_assistant_file_hierarchy and returns
instances of YamlAssistant for loaded files
Args:
file_hierarchy: structure as described in cls.get_assistants_file_hierarchy
role: role of all assistants in this hierarchy (we could find
this out dynamically but it's not worth the pain)
Returns:
list of top level assistants from given hierarchy; these assistants contain
references to instances of their subassistants (and their subassistants, ...)
|
entailment
|
def get_assistants_file_hierarchy(cls, dirs):
"""Returns assistants file hierarchy structure (see below) representing assistant
hierarchy in given directories.
It works like this:
1. It goes through all *.yaml files in all given directories and adds them into
hierarchy (if there are two files with same name in more directories, the file
from first directory wins).
2. For each {name}.yaml file, it calls itself recursively for {name} subdirectories
of all given directories.
Args:
dirs: directories to search
Returns:
hierarchy structure that looks like this:
{'assistant1':
{'source': '/path/to/assistant1.yaml',
'subhierarchy': {<hierarchy of subassistants>}},
'assistant2':
{'source': '/path/to/assistant2.yaml',
'subhierarchy': {<another hierarchy of subassistants}}
}
"""
result = {}
for d in filter(lambda d: os.path.exists(d), dirs):
for f in filter(lambda f: f.endswith('.yaml'), os.listdir(d)):
assistant_name = f[:-5]
if assistant_name not in result:
subas_dirs = [os.path.join(dr, assistant_name) for dr in dirs]
result[assistant_name] = {'source': os.path.join(d, f),
'subhierarchy':
cls.get_assistants_file_hierarchy(subas_dirs)}
return result
|
Returns assistants file hierarchy structure (see below) representing assistant
hierarchy in given directories.
It works like this:
1. It goes through all *.yaml files in all given directories and adds them into
hierarchy (if there are two files with same name in more directories, the file
from first directory wins).
2. For each {name}.yaml file, it calls itself recursively for {name} subdirectories
of all given directories.
Args:
dirs: directories to search
Returns:
hierarchy structure that looks like this:
{'assistant1':
{'source': '/path/to/assistant1.yaml',
'subhierarchy': {<hierarchy of subassistants>}},
'assistant2':
{'source': '/path/to/assistant2.yaml',
'subhierarchy': {<another hierarchy of subassistants}}
}
|
entailment
|
def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True,
role=settings.DEFAULT_ASSISTANT_ROLE):
"""Constructs instance of YamlAssistant loaded from given structure y, loaded
from source file source.
Args:
source: path to assistant source file
y: loaded yaml structure
superassistant: superassistant of this assistant
Returns:
YamlAssistant instance constructed from y with source file source
Raises:
YamlError: if the assistant is malformed
"""
# In pre-0.9.0, we required assistant to be a mapping of {name: assistant_attributes}
# now we allow that, but we also allow omitting the assistant name and putting
# the attributes to top_level, too.
name = os.path.splitext(os.path.basename(source))[0]
yaml_checker.check(source, y)
assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant,
fully_loaded=fully_loaded, role=role)
return assistant
|
Constructs instance of YamlAssistant loaded from given structure y, loaded
from source file source.
Args:
source: path to assistant source file
y: loaded yaml structure
superassistant: superassistant of this assistant
Returns:
YamlAssistant instance constructed from y with source file source
Raises:
YamlError: if the assistant is malformed
|
entailment
|
def get_snippet_by_name(cls, name):
"""name is in dotted format, e.g. topsnippet.something.wantedsnippet"""
name_with_dir_separators = name.replace('.', os.path.sep)
loaded = yaml_loader.YamlLoader.load_yaml_by_relpath(cls.snippets_dirs,
name_with_dir_separators + '.yaml')
if loaded:
return cls._create_snippet(name, *loaded)
raise exceptions.SnippetNotFoundException('no such snippet: {name}'.
format(name=name_with_dir_separators))
|
name is in dotted format, e.g. topsnippet.something.wantedsnippet
|
entailment
|
def conforms(cntxt: Context, n: Node, S: ShExJ.Shape) -> bool:
""" `5.6.1 Schema Validation Requirement <http://shex.io/shex-semantics/#validation-requirement>`_
A graph G is said to conform with a schema S with a ShapeMap m when:
Every, SemAct in the startActs of S has a successful evaluation of semActsSatisfied.
Every node n in m conforms to its associated shapeExprRefs sen where for each shapeExprRef sei in sen:
sei references a ShapeExpr in shapes, and
satisfies(n, sei, G, m) for each shape sei in sen.
:return:
"""
# return semActsSatisfied(cntxt.schema.startActs, cntxt) and \
# all(reference_of(cntxt.schema, sa.shapeLabel) is not None and
#
return True
|
`5.6.1 Schema Validation Requirement <http://shex.io/shex-semantics/#validation-requirement>`_
A graph G is said to conform with a schema S with a ShapeMap m when:
Every, SemAct in the startActs of S has a successful evaluation of semActsSatisfied.
Every node n in m conforms to its associated shapeExprRefs sen where for each shapeExprRef sei in sen:
sei references a ShapeExpr in shapes, and
satisfies(n, sei, G, m) for each shape sei in sen.
:return:
|
entailment
|
def set_legend(self, legend):
"""legend needs to be a list, tuple or None"""
assert(isinstance(legend, list) or isinstance(legend, tuple) or
legend is None)
if legend:
self.legend = [quote(a) for a in legend]
else:
self.legend = None
|
legend needs to be a list, tuple or None
|
entailment
|
def set_legend_position(self, legend_position):
"""Sets legend position. Default is 'r'.
b - At the bottom of the chart, legend entries in a horizontal row.
bv - At the bottom of the chart, legend entries in a vertical column.
t - At the top of the chart, legend entries in a horizontal row.
tv - At the top of the chart, legend entries in a vertical column.
r - To the right of the chart, legend entries in a vertical column.
l - To the left of the chart, legend entries in a vertical column.
"""
if legend_position:
self.legend_position = quote(legend_position)
else:
self.legend_position = None
|
Sets legend position. Default is 'r'.
b - At the bottom of the chart, legend entries in a horizontal row.
bv - At the bottom of the chart, legend entries in a vertical column.
t - At the top of the chart, legend entries in a horizontal row.
tv - At the top of the chart, legend entries in a vertical column.
r - To the right of the chart, legend entries in a vertical column.
l - To the left of the chart, legend entries in a vertical column.
|
entailment
|
def data_class_detection(self, data):
"""Determines the appropriate data encoding type to give satisfactory
resolution (http://code.google.com/apis/chart/#chart_data).
"""
assert(isinstance(data, list) or isinstance(data, tuple))
if not isinstance(self, (LineChart, BarChart, ScatterChart)):
# From the link above:
# Simple encoding is suitable for all other types of chart
# regardless of size.
return SimpleData
elif self.height < 100:
# The link above indicates that line and bar charts less
# than 300px in size can be suitably represented with the
# simple encoding. I've found that this isn't sufficient,
# e.g. examples/line-xy-circle.png. Let's try 100px.
return SimpleData
else:
return ExtendedData
|
Determines the appropriate data encoding type to give satisfactory
resolution (http://code.google.com/apis/chart/#chart_data).
|
entailment
|
def data_x_range(self):
"""Return a 2-tuple giving the minimum and maximum x-axis
data range.
"""
try:
lower = min([min(self._filter_none(s))
for type, s in self.annotated_data()
if type == 'x'])
upper = max([max(self._filter_none(s))
for type, s in self.annotated_data()
if type == 'x'])
return (lower, upper)
except ValueError:
return None
|
Return a 2-tuple giving the minimum and maximum x-axis
data range.
|
entailment
|
def scaled_data(self, data_class, x_range=None, y_range=None):
"""Scale `self.data` as appropriate for the given data encoding
(data_class) and return it.
An optional `y_range` -- a 2-tuple (lower, upper) -- can be
given to specify the y-axis bounds. If not given, the range is
inferred from the data: (0, <max-value>) presuming no negative
values, or (<min-value>, <max-value>) if there are negative
values. `self.scaled_y_range` is set to the actual lower and
upper scaling range.
Ditto for `x_range`. Note that some chart types don't have x-axis
data.
"""
self.scaled_data_class = data_class
# Determine the x-axis range for scaling.
if x_range is None:
x_range = self.data_x_range()
if x_range and x_range[0] > 0:
x_range = (x_range[0], x_range[1])
self.scaled_x_range = x_range
# Determine the y-axis range for scaling.
if y_range is None:
y_range = self.data_y_range()
if y_range and y_range[0] > 0:
y_range = (y_range[0], y_range[1])
self.scaled_y_range = y_range
scaled_data = []
for type, dataset in self.annotated_data():
if type == 'x':
scale_range = x_range
elif type == 'y':
scale_range = y_range
elif type == 'marker-size':
scale_range = (0, max(dataset))
scaled_dataset = []
for v in dataset:
if v is None:
scaled_dataset.append(None)
else:
scaled_dataset.append(
data_class.scale_value(v, scale_range))
scaled_data.append(scaled_dataset)
return scaled_data
|
Scale `self.data` as appropriate for the given data encoding
(data_class) and return it.
An optional `y_range` -- a 2-tuple (lower, upper) -- can be
given to specify the y-axis bounds. If not given, the range is
inferred from the data: (0, <max-value>) presuming no negative
values, or (<min-value>, <max-value>) if there are negative
values. `self.scaled_y_range` is set to the actual lower and
upper scaling range.
Ditto for `x_range`. Note that some chart types don't have x-axis
data.
|
entailment
|
def set_codes(self, codes):
'''Set the country code map for the data.
Codes given in a list.
i.e. DE - Germany
AT - Austria
US - United States
'''
codemap = ''
for cc in codes:
cc = cc.upper()
if cc in self.__ccodes:
codemap += cc
else:
raise UnknownCountryCodeException(cc)
self.codes = codemap
|
Set the country code map for the data.
Codes given in a list.
i.e. DE - Germany
AT - Austria
US - United States
|
entailment
|
def set_geo_area(self, area):
'''Sets the geo area for the map.
* africa
* asia
* europe
* middle_east
* south_america
* usa
* world
'''
if area in self.__areas:
self.geo_area = area
else:
raise UnknownChartType('Unknown chart type for maps: %s' %area)
|
Sets the geo area for the map.
* africa
* asia
* europe
* middle_east
* south_america
* usa
* world
|
entailment
|
def add_data_dict(self, datadict):
'''Sets the data and country codes via a dictionary.
i.e. {'DE': 50, 'GB': 30, 'AT': 70}
'''
self.set_codes(list(datadict.keys()))
self.add_data(list(datadict.values()))
|
Sets the data and country codes via a dictionary.
i.e. {'DE': 50, 'GB': 30, 'AT': 70}
|
entailment
|
def can_cast_to(v: Literal, dt: str) -> bool:
""" 5.4.3 Datatype Constraints
Determine whether "a value of the lexical form of n can be cast to the target type v per
XPath Functions 3.1 section 19 Casting[xpath-functions]."
"""
# TODO: rdflib doesn't appear to pay any attention to lengths (e.g. 257 is a valid XSD.byte)
return v.value is not None and Literal(str(v), datatype=dt).value is not None
|
5.4.3 Datatype Constraints
Determine whether "a value of the lexical form of n can be cast to the target type v per
XPath Functions 3.1 section 19 Casting[xpath-functions]."
|
entailment
|
def total_digits(n: Literal) -> Optional[int]:
""" 5.4.5 XML Schema Numberic Facet Constraints
totaldigits and fractiondigits constraints on values not derived from xsd:decimal fail.
"""
return len(str(abs(int(n.value)))) + fraction_digits(n) if is_numeric(n) and n.value is not None else None
|
5.4.5 XML Schema Numberic Facet Constraints
totaldigits and fractiondigits constraints on values not derived from xsd:decimal fail.
|
entailment
|
def fraction_digits(n: Literal) -> Optional[int]:
""" 5.4.5 XML Schema Numeric Facet Constraints
for "fractiondigits" constraints, v is less than or equals the number of digits to the right of the decimal place
in the XML Schema canonical form[xmlschema-2] of the value of n, ignoring trailing zeros.
"""
# Note - the last expression below isolates the fractional portion, reverses it (e.g. 017320 --> 023710) and
# converts it to an integer and back to a string
return None if not is_numeric(n) or n.value is None \
else 0 if is_integer(n) or '.' not in str(n.value) or str(n.value).split('.')[1] == '0' \
else len(str(int(str(n.value).split('.')[1][::-1])))
|
5.4.5 XML Schema Numeric Facet Constraints
for "fractiondigits" constraints, v is less than or equals the number of digits to the right of the decimal place
in the XML Schema canonical form[xmlschema-2] of the value of n, ignoring trailing zeros.
|
entailment
|
def _map_xpath_flags_to_re(expr: str, xpath_flags: str) -> Tuple[int, str]:
""" Map `5.6.2 Flags <https://www.w3.org/TR/xpath-functions-31/#flags>`_ to python
:param expr: match pattern
:param xpath_flags: xpath flags
:returns: python flags / modified match pattern
"""
python_flags: int = 0
modified_expr = expr
if xpath_flags is None:
xpath_flags = ""
if 's' in xpath_flags:
python_flags |= re.DOTALL
if 'm' in xpath_flags:
python_flags |= re.MULTILINE
if 'i' in xpath_flags:
python_flags |= re.IGNORECASE
if 'x' in xpath_flags:
modified_expr = re.sub(r'[\t\n\r ]|\[[^\]]*\]', _char_class_escape, modified_expr)
if 'q' in xpath_flags:
modified_expr = re.escape(modified_expr)
return python_flags, modified_expr
|
Map `5.6.2 Flags <https://www.w3.org/TR/xpath-functions-31/#flags>`_ to python
:param expr: match pattern
:param xpath_flags: xpath flags
:returns: python flags / modified match pattern
|
entailment
|
def map_object_literal(v: Union[str, jsonasobj.JsonObj]) -> ShExJ.ObjectLiteral:
""" `PyShEx.jsg <https://github.com/hsolbrig/ShExJSG/ShExJSG/ShExJ.jsg>`_ does not add identifying
types to ObjectLiterals. This routine re-identifies the types
"""
# TODO: isinstance(v, JSGString) should work here, but it doesn't with IRIREF(http://a.example/v1)
return v if issubclass(type(v), JSGString) or (isinstance(v, JSGObject) and 'type' in v) else \
ShExJ.IRIREF(v) if isinstance(v, str) else ShExJ.ObjectLiteral(**v._as_dict)
|
`PyShEx.jsg <https://github.com/hsolbrig/ShExJSG/ShExJSG/ShExJ.jsg>`_ does not add identifying
types to ObjectLiterals. This routine re-identifies the types
|
entailment
|
def do_enable():
"""
Uncomment any lines that start with #import in the .pth file
"""
try:
_lines = []
with open(vext_pth, mode='r') as f:
for line in f.readlines():
if line.startswith('#') and line[1:].lstrip().startswith('import '):
_lines.append(line[1:].lstrip())
else:
_lines.append(line)
try:
os.unlink('%s.tmp' % vext_pth)
except:
pass
with open('%s.tmp' % vext_pth, mode='w+') as f:
f.writelines(_lines)
try:
os.unlink('%s~' % vext_pth)
except:
pass
os.rename(vext_pth, '%s~' % vext_pth)
os.rename('%s.tmp' % vext_pth, vext_pth)
except IOError as e:
if e.errno == 2:
# vext file doesn't exist, recreate it.
create_pth()
|
Uncomment any lines that start with #import in the .pth file
|
entailment
|
def do_disable():
"""
Comment any lines that start with import in the .pth file
"""
from vext import vext_pth
try:
_lines = []
with open(vext_pth, mode='r') as f:
for line in f.readlines():
if not line.startswith('#') and line.startswith('import '):
_lines.append('# %s' % line)
else:
_lines.append(line)
try:
os.unlink('%s.tmp' % vext_pth)
except:
pass
with open('%s.tmp' % vext_pth, mode='w+') as f:
f.writelines(_lines)
try:
os.unlink('%s~' % vext_pth)
except:
pass
os.rename(vext_pth, '%s~' % vext_pth)
os.rename('%s.tmp' % vext_pth, vext_pth)
except IOError as e:
if e.errno == 2: # file didn't exist == disabled
return
|
Comment any lines that start with import in the .pth file
|
entailment
|
def do_check(vext_files):
"""
Attempt to import everything in the 'test-imports' section of specified
vext_files
:param: list of vext filenames (without paths), '*' matches all.
:return: True if test_imports was successful from all files
"""
import vext
# not efficient ... but then there shouldn't be many of these
all_specs = set(vext.gatekeeper.spec_files_flat())
if vext_files == ['*']:
vext_files = all_specs
unknown_specs = set(vext_files) - all_specs
for fn in unknown_specs:
print("%s is not an installed vext file." % fn, file=sys.stderr)
if unknown_specs:
return False
check_passed = True
for fn in [join(vext.gatekeeper.spec_dir(), fn) for fn in vext_files]:
f = open_spec(open(fn))
modules = f.get('test_import', [])
for success, module in vext.gatekeeper.test_imports(modules):
if not success:
check_passed = False
line = "import %s: %s" % (module, '[success]' if success else '[failed]')
print(line)
print('')
return check_passed
|
Attempt to import everything in the 'test-imports' section of specified
vext_files
:param: list of vext filenames (without paths), '*' matches all.
:return: True if test_imports was successful from all files
|
entailment
|
def fix_path(p):
"""
Convert path pointing subdirectory of virtualenv site-packages
to system site-packages.
Destination directory must exist for this to work.
>>> fix_path('C:\\some-venv\\Lib\\site-packages\\gnome')
'C:\\Python27\\lib\\site-packages\\gnome'
"""
venv_lib = get_python_lib()
if p.startswith(venv_lib):
subdir = p[len(venv_lib) + 1:]
for sitedir in getsyssitepackages():
fixed_path = join(sitedir, subdir)
if isdir(fixed_path):
return fixed_path
return p
|
Convert path pointing subdirectory of virtualenv site-packages
to system site-packages.
Destination directory must exist for this to work.
>>> fix_path('C:\\some-venv\\Lib\\site-packages\\gnome')
'C:\\Python27\\lib\\site-packages\\gnome'
|
entailment
|
def fixup_paths():
"""
Fixup paths added in .pth file that point to the virtualenv
instead of the system site packages.
In depth: .PTH can execute arbitrary code, which might
manipulate the PATH or sys.path
:return:
"""
original_paths = os.environ.get('PATH', "").split(os.path.pathsep)
original_dirs = set(added_dirs)
yield
# Fix PATH environment variable
current_paths = os.environ.get('PATH', "").split(os.path.pathsep)
if original_paths != current_paths:
changed_paths = set(current_paths).difference(set(original_paths))
# rebuild PATH env var
fixed_paths = []
for path in current_paths:
if path in changed_paths:
fixed_paths.append(env_t(fix_path(path)))
else:
fixed_paths.append(env_t(path))
os.environ['PATH'] = os.pathsep.join(fixed_paths)
# Fix added_dirs
if added_dirs != original_dirs:
for path in set(added_dirs.difference(original_dirs)):
fixed_path = fix_path(path)
if fixed_path != path:
print("Fix %s >> %s" % (path, fixed_path))
added_dirs.remove(path)
added_dirs.add(fixed_path)
i = sys.path.index(path) # not efficient... but shouldn't happen often
sys.path[i] = fixed_path
if env_t(fixed_path) not in os.environ['PATH']:
os.environ['PATH'].append(os.pathsep + env_t(fixed_path))
|
Fixup paths added in .pth file that point to the virtualenv
instead of the system site packages.
In depth: .PTH can execute arbitrary code, which might
manipulate the PATH or sys.path
:return:
|
entailment
|
def addpackage(sys_sitedir, pthfile, known_dirs):
"""
Wrapper for site.addpackage
Try and work out which directories are added by
the .pth and add them to the known_dirs set
:param sys_sitedir: system site-packages directory
:param pthfile: path file to add
:param known_dirs: set of known directories
"""
with open(join(sys_sitedir, pthfile)) as f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
line = line.rstrip()
if line:
if line.startswith(("import ", "import\t")):
exec (line, globals(), locals())
continue
else:
p_rel = join(sys_sitedir, line)
p_abs = abspath(line)
if isdir(p_rel):
os.environ['PATH'] += env_t(os.pathsep + p_rel)
sys.path.append(p_rel)
added_dirs.add(p_rel)
elif isdir(p_abs):
os.environ['PATH'] += env_t(os.pathsep + p_abs)
sys.path.append(p_abs)
added_dirs.add(p_abs)
if isfile(pthfile):
site.addpackage(sys_sitedir, pthfile, known_dirs)
else:
logging.debug("pth file '%s' not found")
|
Wrapper for site.addpackage
Try and work out which directories are added by
the .pth and add them to the known_dirs set
:param sys_sitedir: system site-packages directory
:param pthfile: path file to add
:param known_dirs: set of known directories
|
entailment
|
def filename_to_module(filename):
"""
convert a filename like html5lib-0.999.egg-info to html5lib
"""
find = re.compile(r"^[^.|-]*")
name = re.search(find, filename).group(0)
return name
|
convert a filename like html5lib-0.999.egg-info to html5lib
|
entailment
|
def init_path():
"""
Add any new modules that are directories to the PATH
"""
sitedirs = getsyssitepackages()
for sitedir in sitedirs:
env_path = os.environ['PATH'].split(os.pathsep)
for module in allowed_modules:
p = join(sitedir, module)
if isdir(p) and not p in env_path:
os.environ['PATH'] += env_t(os.pathsep + p)
|
Add any new modules that are directories to the PATH
|
entailment
|
def install_importer():
"""
If in a virtualenv then load spec files to decide which
modules can be imported from system site-packages and
install path hook.
"""
logging.debug('install_importer')
if not in_venv():
logging.debug('No virtualenv active py:[%s]', sys.executable)
return False
if disable_vext:
logging.debug('Vext disabled by environment variable')
return False
if GatekeeperFinder.PATH_TRIGGER not in sys.path:
try:
load_specs()
sys.path.append(GatekeeperFinder.PATH_TRIGGER)
sys.path_hooks.append(GatekeeperFinder)
except Exception as e:
"""
Dont kill other programmes because of a vext error
"""
logger.info(str(e))
if logger.getEffectiveLevel() == logging.DEBUG:
raise
logging.debug("importer installed")
return True
|
If in a virtualenv then load spec files to decide which
modules can be imported from system site-packages and
install path hook.
|
entailment
|
def load_module(self, name):
"""
Only lets modules in allowed_modules be loaded, others
will get an ImportError
"""
# Get the name relative to SITEDIR ..
filepath = self.module_info[1]
fullname = splitext( \
relpath(filepath, self.sitedir) \
)[0].replace(os.sep, '.')
modulename = filename_to_module(fullname)
if modulename not in allowed_modules:
if remember_blocks:
blocked_imports.add(fullname)
if log_blocks:
raise ImportError("Vext blocked import of '%s'" % modulename)
else:
# Standard error message
raise ImportError("No module named %s" % modulename)
if name not in sys.modules:
try:
logger.debug("load_module %s %s", name, self.module_info)
module = imp.load_module(name, *self.module_info)
except Exception as e:
logger.debug(e)
raise
sys.modules[fullname] = module
return sys.modules[fullname]
|
Only lets modules in allowed_modules be loaded, others
will get an ImportError
|
entailment
|
def extra_paths():
"""
:return: extra paths
"""
# TODO - this is only tested on Ubuntu for now
# there must be a better way of getting
# the sip directory.
dirs = {}
try:
@vext.env.run_in_syspy
def run(*args):
import sipconfig
config = sipconfig.Configuration()
dirs = {
"sip.default_sip_dir": config.default_sip_dir,
}
return dirs
dirs = run()
return dirs
except ImportError:
return dirs
|
:return: extra paths
|
entailment
|
def str_to_bytes(value):
"""
Simply convert a string type to bytes if the value is a string
and is an instance of six.string_types but not of six.binary_type
in python2 struct.pack("<Q") is both string_types and binary_type but
in python3 struct.pack("<Q") is binary_type but not a string_types
:param value:
:param binary:
:return:
"""
if not isinstance(value, six.binary_type) and isinstance(value, six.string_types):
return value.encode()
return value
|
Simply convert a string type to bytes if the value is a string
and is an instance of six.string_types but not of six.binary_type
in python2 struct.pack("<Q") is both string_types and binary_type but
in python3 struct.pack("<Q") is binary_type but not a string_types
:param value:
:param binary:
:return:
|
entailment
|
def evaluate(g: Graph,
schema: Union[str, ShExJ.Schema],
focus: Optional[Union[str, URIRef, IRIREF]],
start: Optional[Union[str, URIRef, IRIREF, START, START_TYPE]]=None,
debug_trace: bool = False) -> Tuple[bool, Optional[str]]:
""" Evaluate focus node `focus` in graph `g` against shape `shape` in ShEx schema `schema`
:param g: Graph containing RDF
:param schema: ShEx Schema -- if str, it will be parsed
:param focus: focus node in g. If not specified, all URI subjects in G will be evaluated.
:param start: Starting shape. If omitted, the Schema start shape is used
:param debug_trace: Turn on debug tracing
:return: None if success or failure reason if failure
"""
if isinstance(schema, str):
schema = SchemaLoader().loads(schema)
if schema is None:
return False, "Error parsing schema"
if not isinstance(focus, URIRef):
focus = URIRef(str(focus))
if start is None:
start = str(schema.start) if schema.start else None
if start is None:
return False, "No starting shape"
if not isinstance(start, IRIREF) and start is not START and start is not START_TYPE:
start = IRIREF(str(start))
cntxt = Context(g, schema)
cntxt.debug_context.debug = debug_trace
map_ = FixedShapeMap()
map_.add(ShapeAssociation(focus, start))
test_result, reasons = isValid(cntxt, map_)
return test_result, '\n'.join(reasons)
|
Evaluate focus node `focus` in graph `g` against shape `shape` in ShEx schema `schema`
:param g: Graph containing RDF
:param schema: ShEx Schema -- if str, it will be parsed
:param focus: focus node in g. If not specified, all URI subjects in G will be evaluated.
:param start: Starting shape. If omitted, the Schema start shape is used
:param debug_trace: Turn on debug tracing
:return: None if success or failure reason if failure
|
entailment
|
def isValid(cntxt: Context, m: FixedShapeMap) -> Tuple[bool, List[str]]:
"""`5.2 Validation Definition <http://shex.io/shex-semantics/#validation>`_
The expression isValid(G, m) indicates that for every nodeSelector/shapeLabel pair (n, s) in m, s has a
corresponding shape expression se and satisfies(n, se, G, m). satisfies is defined below for each form
of shape expression
:param cntxt: evaluation context - includes graph and schema
:param m: list of NodeShape pairs to test
:return: Success/failure indicator and, if fail, a list of failure reasons
"""
if not cntxt.is_valid:
return False, cntxt.error_list
parse_nodes = []
for nodeshapepair in m:
n = nodeshapepair.nodeSelector
if not isinstance_(n, Node):
return False, [f"{n}: Triple patterns are not implemented"]
# The third test below is because the spec asserts that completely empty graphs pass in certain circumstances
elif not (next(cntxt.graph.predicate_objects(nodeshapepair.nodeSelector), None) or
next(cntxt.graph.subject_predicates(nodeshapepair.nodeSelector), None) or
not next(cntxt.graph.triples((None, None, None)), None)):
return False, [f"Focus: {nodeshapepair.nodeSelector} not in graph"]
else:
s = cntxt.shapeExprFor(START if nodeshapepair.shapeLabel is None or nodeshapepair.shapeLabel is START
else nodeshapepair.shapeLabel)
cntxt.current_node = ParseNode(satisfies, s, n, cntxt)
if not s:
if nodeshapepair.shapeLabel is START or nodeshapepair.shapeLabel is None:
cntxt.fail_reason = "START node is not specified or is invalid"
else:
cntxt.fail_reason = f"Shape: {nodeshapepair.shapeLabel} not found in Schema"
return False, cntxt.process_reasons()
parse_nodes.append(cntxt.current_node)
if not satisfies(cntxt, n, s):
cntxt.current_node.result = False
return False, cntxt.process_reasons()
else:
cntxt.current_node.result = True
return True, []
|
`5.2 Validation Definition <http://shex.io/shex-semantics/#validation>`_
The expression isValid(G, m) indicates that for every nodeSelector/shapeLabel pair (n, s) in m, s has a
corresponding shape expression se and satisfies(n, se, G, m). satisfies is defined below for each form
of shape expression
:param cntxt: evaluation context - includes graph and schema
:param m: list of NodeShape pairs to test
:return: Success/failure indicator and, if fail, a list of failure reasons
|
entailment
|
def check_sysdeps(vext_files):
"""
Check that imports in 'test_imports' succeed
otherwise display message in 'install_hints'
"""
@run_in_syspy
def run(*modules):
result = {}
for m in modules:
if m:
try:
__import__(m)
result[m] = True
except ImportError:
result[m] = False
return result
success = True
for vext_file in vext_files:
with open(vext_file) as f:
vext = open_spec(f)
install_hint = " ".join(vext.get('install_hints', ['System dependencies not found']))
modules = vext.get('test_import', '')
logger.debug("%s test imports of: %s", vext_file, modules)
result = run(*modules)
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
for k, v in result.items():
logger.debug("%s: %s", k, v)
if not all(result.values()):
success = False
print(install_hint)
return success
|
Check that imports in 'test_imports' succeed
otherwise display message in 'install_hints'
|
entailment
|
def install_vexts(vext_files, verify=True):
"""
copy vext_file to sys.prefix + '/share/vext/specs'
(PIP7 seems to remove data_files so we recreate something similar here)
"""
if verify and not check_sysdeps(vext_files):
return
spec_dir = join(prefix, 'share/vext/specs')
try:
makedirs(spec_dir)
except OSError as e:
if not isdir(spec_dir):
logger.error("Error making spec directory [%s]: %r" % (spec_dir, e))
for vext_file in vext_files:
dest = normpath(join(spec_dir, basename(vext_file)))
try:
logger.debug("%s > %s" % (vext_file, dest))
copyfile(vext_file, dest)
yield vext_file, dest
except IOError as e:
logger.error("Could not copy %s %r" % (vext_file, e))
|
copy vext_file to sys.prefix + '/share/vext/specs'
(PIP7 seems to remove data_files so we recreate something similar here)
|
entailment
|
def create_pth():
"""
Create the default PTH file
:return:
"""
if prefix == '/usr':
print("Not creating PTH in real prefix: %s" % prefix)
return False
with open(vext_pth, 'w') as f:
f.write(DEFAULT_PTH_CONTENT)
return True
|
Create the default PTH file
:return:
|
entailment
|
def format_collection(g: Graph, subj: Union[URIRef, BNode], max_entries: int = None, nentries: int = 0) -> Optional[List[str]]:
"""
Return the turtle representation of subj as a collection
:param g: Graph containing subj
:param subj: subject of list
:param max_entries: maximum number of list elements to return, None means all
:param nentries: used for recursion
:return: List of formatted entries if subj heads a well formed collection else None
"""
if subj == RDF.nil:
return [')']
if max_entries is not None and nentries >= max_entries:
return [' ...', ')']
cadr = cdr = None
for p, o in g.predicate_objects(subj):
if p == RDF.first and cadr is None:
cadr = o
elif p == RDF.rest and cdr is None:
cdr = o
else:
return None
# technically this can't happen but it doesn't hurt to address it
if cadr == RDF.nil and cdr is None:
return []
elif cadr is not None and cdr is not None:
return [(' ' if nentries else '(') + cadr.n3(g.namespace_manager)] + format_collection(g, cdr, max_entries,
nentries+1)
else:
return None
|
Return the turtle representation of subj as a collection
:param g: Graph containing subj
:param subj: subject of list
:param max_entries: maximum number of list elements to return, None means all
:param nentries: used for recursion
:return: List of formatted entries if subj heads a well formed collection else None
|
entailment
|
def get_extra_path(name):
"""
:param name: name in format helper.path_name
sip.default_sip_dir
"""
# Paths are cached in path_cache
helper_name, _, key = name.partition(".")
helper = path_helpers.get(helper_name)
if not helper:
raise ValueError("Helper '{0}' not found.".format(helper))
if name not in path_cache:
extra_paths = helper.extra_paths()
path_cache.update(extra_paths)
extra_path = path_cache.get(name)
if not extra_path:
raise ValueError("Helper '{0}' has no path called {1}".format(helper_name, name))
return extra_path
|
:param name: name in format helper.path_name
sip.default_sip_dir
|
entailment
|
def upgrade_setuptools():
"""
setuptools 12.2 can trigger a really nasty bug
that eats all memory, so upgrade it to
18.8, which is known to be good.
"""
# Note - I tried including the higher version in
# setup_requires, but was still able to trigger
# the bug. - stu.axon
global MIN_SETUPTOOLS
r = None
try:
r = pkg_resources.require(["setuptools"])[0]
except DistributionNotFound:
# ok, setuptools will be installed later
return
if StrictVersion(r.version) >= StrictVersion(MIN_SETUPTOOLS):
return
else:
print("Upgrading setuptools...")
subprocess.call("%s -mpip install 'setuptools>=%s'" % (sys.executable, MIN_SETUPTOOLS), shell=True)
|
setuptools 12.2 can trigger a really nasty bug
that eats all memory, so upgrade it to
18.8, which is known to be good.
|
entailment
|
def installed_packages(self):
""" :return: list of installed packages """
packages = []
CMDLINE = [sys.executable, "-mpip", "freeze"]
try:
for package in subprocess.check_output(CMDLINE) \
.decode('utf-8'). \
splitlines():
for comparator in ["==", ">=", "<=", "<", ">"]:
if comparator in package:
# installed package names usually look like Pillow==2.8.1
# ignore others, like external packages that pip show
# won't understand
name = package.partition(comparator)[0]
packages.append(name)
except RuntimeError as e:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Exception checking existing packages.")
logger.debug("cmdline: %s", CMDLINE)
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
logger.debug()
return packages
|
:return: list of installed packages
|
entailment
|
def package_info(self):
"""
:return: list of package info on installed packages
"""
import subprocess
# create a commandline like pip show Pillow show
package_names = self.installed_packages()
if not package_names:
# No installed packages yet, so nothign to do here...
return []
cmdline = [sys.executable, "-mpip"]
for name in package_names:
cmdline.extend(["show", name])
output = subprocess.check_output(cmdline)
# Python 3 fix
if not isinstance(output, str):
# Some package info is encoded in Latin-1 or something other than
# UTF8. Replace non-UTF characters with '?' instead of crashing.
output = str(output, encoding='UTF-8', errors='replace')
# parse output that looks like this example
"""
---
Name: Pillow
Version: 2.8.1
Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/Pillow-2.8.1-py2.7-linux-x86_64.egg
Requires:
---
Name: vext.gi
Version: 0.5.6.25
Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/vext.gi-0.5.6.25-py2.7.egg
Requires: vext
"""
results = []
for info in output[3:].split("---"):
d = {}
for line in info[1:].splitlines():
arg, _, value = line.partition(': ')
arg = arg.lower()
if arg == 'requires':
value = value.split(', ')
d[arg] = value
results.append(d)
return results
|
:return: list of package info on installed packages
|
entailment
|
def depends_on(self, dependency):
"""
List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow'
"""
packages = self.package_info()
return [package for package in packages if dependency in package.get("requires", "")]
|
List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow'
|
entailment
|
def find_vext_files(self):
"""
:return: Absolute paths to any provided vext files
"""
packages = self.depends_on("vext")
vext_files = []
for location in [package.get("location") for package in packages]:
if not location:
continue
vext_files.extend(glob(join(location, "*.vext")))
return vext_files
|
:return: Absolute paths to any provided vext files
|
entailment
|
def run(self):
"""
Need to find any pre-existing vext contained in dependent packages
and install them
example:
you create a setup.py with install_requires["vext.gi"]:
- vext.gi gets installed using bdist_egg
- vext itself is now called with bdist_egg and we end up here
Vext now needs to find and install .vext files in vext.gi
[or any other files that depend on vext]
:return:
"""
logger.debug("vext InstallLib [started]")
# Find packages that depend on vext and check for .vext files...
logger.debug("find_vext_files")
vext_files = self.find_vext_files()
logger.debug("manually_install_vext: ", vext_files)
self.manually_install_vext(vext_files)
logger.debug("enable vext")
self.enable_vext()
logger.debug("install_lib.run")
install_lib.run(self)
logger.debug("vext InstallLib [finished]")
|
Need to find any pre-existing vext contained in dependent packages
and install them
example:
you create a setup.py with install_requires["vext.gi"]:
- vext.gi gets installed using bdist_egg
- vext itself is now called with bdist_egg and we end up here
Vext now needs to find and install .vext files in vext.gi
[or any other files that depend on vext]
:return:
|
entailment
|
def set_servers(self, servers):
"""
Iter to a list of servers and instantiate Protocol class.
:param servers: A list of servers
:type servers: list
:return: Returns nothing
:rtype: None
"""
if isinstance(servers, six.string_types):
servers = [servers]
assert servers, "No memcached servers supplied"
self._servers = [Protocol(
server=server,
username=self.username,
password=self.password,
compression=self.compression,
socket_timeout=self.socket_timeout,
pickle_protocol=self.pickle_protocol,
pickler=self.pickler,
unpickler=self.unpickler,
) for server in servers]
|
Iter to a list of servers and instantiate Protocol class.
:param servers: A list of servers
:type servers: list
:return: Returns nothing
:rtype: None
|
entailment
|
def flush_all(self, time=0):
"""
Send a command to server flush|delete all keys.
:param time: Time to wait until flush in seconds.
:type time: int
:return: True in case of success, False in case of failure
:rtype: bool
"""
returns = []
for server in self.servers:
returns.append(server.flush_all(time))
return any(returns)
|
Send a command to server flush|delete all keys.
:param time: Time to wait until flush in seconds.
:type time: int
:return: True in case of success, False in case of failure
:rtype: bool
|
entailment
|
def stats(self, key=None):
"""
Return server stats.
:param key: Optional if you want status from a key.
:type key: six.string_types
:return: A dict with server stats
:rtype: dict
"""
# TODO: Stats with key is not working.
returns = {}
for server in self.servers:
returns[server.server] = server.stats(key)
return returns
|
Return server stats.
:param key: Optional if you want status from a key.
:type key: six.string_types
:return: A dict with server stats
:rtype: dict
|
entailment
|
def house_explosions():
"""
Data from http://indexed.blogspot.com/2007/12/meltdown-indeed.html
"""
chart = PieChart2D(int(settings.width * 1.7), settings.height)
chart.add_data([10, 10, 30, 200])
chart.set_pie_labels([
'Budding Chemists',
'Propane issues',
'Meth Labs',
'Attempts to escape morgage',
])
chart.download('pie-house-explosions.png')
|
Data from http://indexed.blogspot.com/2007/12/meltdown-indeed.html
|
entailment
|
def objectValueMatches(n: Node, vsv: ShExJ.objectValue) -> bool:
""" http://shex.io/shex-semantics/#values
Implements "n = vsv" where vsv is an objectValue and n is a Node
Note that IRIREF is a string pattern, so the matching type is str
"""
return \
(isinstance(vsv, IRIREF) and isinstance(n, URIRef) and uriref_matches_iriref(n, vsv)) or \
(isinstance(vsv, ShExJ.ObjectLiteral) and isinstance(n, Literal) and literal_matches_objectliteral(n, vsv))
|
http://shex.io/shex-semantics/#values
Implements "n = vsv" where vsv is an objectValue and n is a Node
Note that IRIREF is a string pattern, so the matching type is str
|
entailment
|
def uriref_matches_iriref(v1: URIRef, v2: Union[str, ShExJ.IRIREF]) -> bool:
""" Compare :py:class:`rdflib.URIRef` value with :py:class:`ShExJ.IRIREF` value """
return str(v1) == str(v2)
|
Compare :py:class:`rdflib.URIRef` value with :py:class:`ShExJ.IRIREF` value
|
entailment
|
def uriref_startswith_iriref(v1: URIRef, v2: Union[str, ShExJ.IRIREF]) -> bool:
""" Determine whether a :py:class:`rdflib.URIRef` value starts with the text of a :py:class:`ShExJ.IRIREF` value """
return str(v1).startswith(str(v2))
|
Determine whether a :py:class:`rdflib.URIRef` value starts with the text of a :py:class:`ShExJ.IRIREF` value
|
entailment
|
def literal_matches_objectliteral(v1: Literal, v2: ShExJ.ObjectLiteral) -> bool:
""" Compare :py:class:`rdflib.Literal` with :py:class:`ShExJ.objectLiteral` """
v2_lit = Literal(str(v2.value), datatype=iriref_to_uriref(v2.type), lang=str(v2.language) if v2.language else None)
return v1 == v2_lit
|
Compare :py:class:`rdflib.Literal` with :py:class:`ShExJ.objectLiteral`
|
entailment
|
def satisfiesNodeConstraint(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, _: DebugContext) -> bool:
""" `5.4.1 Semantics <http://shex.io/shex-semantics/#node-constraint-semantics>`_
For a node n and constraint nc, satisfies2(n, nc) if and only if for every nodeKind, datatype, xsFacet and
values constraint value v present in nc nodeSatisfies(n, v). The following sections define nodeSatisfies for
each of these types of constraints:
"""
return nodeSatisfiesNodeKind(cntxt, n, nc) and nodeSatisfiesDataType(cntxt, n, nc) and \
nodeSatisfiesStringFacet(cntxt, n, nc) and nodeSatisfiesNumericFacet(cntxt, n, nc) and \
nodeSatisfiesValues(cntxt, n, nc)
|
`5.4.1 Semantics <http://shex.io/shex-semantics/#node-constraint-semantics>`_
For a node n and constraint nc, satisfies2(n, nc) if and only if for every nodeKind, datatype, xsFacet and
values constraint value v present in nc nodeSatisfies(n, v). The following sections define nodeSatisfies for
each of these types of constraints:
|
entailment
|
def nodeSatisfiesNodeKind(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, c: DebugContext) -> bool:
""" `5.4.2 Node Kind Constraints <http://shex.io/shex-semantics/#nodeKind>`_
For a node n and constraint value v, nodeSatisfies(n, v) if:
* v = "iri" and n is an IRI.
* v = "bnode" and n is a blank node.
* v = "literal" and n is a Literal.
* v = "nonliteral" and n is an IRI or blank node.
"""
if c.debug and nc.nodeKind is not None:
print(f" Kind: {nc.nodeKind}")
if nc.nodeKind is None or \
(nc.nodeKind == 'iri' and isinstance(n, URIRef)) or \
(nc.nodeKind == 'bnode' and isinstance(n, BNode)) or \
(nc.nodeKind == 'literal' and isinstance(n, Literal)) or \
(nc.nodeKind == 'nonliteral' and isinstance(n, (URIRef, BNode))):
return True
cntxt.fail_reason = f"Node kind mismatch have: {type(n).__name__} expected: {nc.nodeKind}"
return False
|
`5.4.2 Node Kind Constraints <http://shex.io/shex-semantics/#nodeKind>`_
For a node n and constraint value v, nodeSatisfies(n, v) if:
* v = "iri" and n is an IRI.
* v = "bnode" and n is a blank node.
* v = "literal" and n is a Literal.
* v = "nonliteral" and n is an IRI or blank node.
|
entailment
|
def nodeSatisfiesDataType(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, c: DebugContext) -> bool:
""" `5.4.3 Datatype Constraints <http://shex.io/shex-semantics/#datatype>`_
For a node n and constraint value v, nodeSatisfies(n, v) if n is an Literal with the datatype v and, if v is in
the set of SPARQL operand data types[sparql11-query], an XML schema string with a value of the lexical form of
n can be cast to the target type v per XPath Functions 3.1 section 19 Casting[xpath-functions]. Only datatypes
supported by SPARQL MUST be tested but ShEx extensions MAY add support for other datatypes.
"""
if nc.datatype is None:
return True
if c.debug:
print(f" Datatype: {nc.datatype}")
if not isinstance(n, Literal):
cntxt.fail_reason = f"Datatype constraint ({nc.datatype}) " \
f"does not match {type(n).__name__} {cntxt.n3_mapper.n3(n)}"
cntxt.dump_bnode(n)
return False
actual_datatype = _datatype(n)
if actual_datatype == str(nc.datatype) or \
(is_sparql_operand_datatype(nc.datatype) and can_cast_to(n, nc.datatype)):
return True
cntxt.fail_reason = f"Datatype mismatch - expected: {nc.datatype} actual: {actual_datatype}"
return False
|
`5.4.3 Datatype Constraints <http://shex.io/shex-semantics/#datatype>`_
For a node n and constraint value v, nodeSatisfies(n, v) if n is an Literal with the datatype v and, if v is in
the set of SPARQL operand data types[sparql11-query], an XML schema string with a value of the lexical form of
n can be cast to the target type v per XPath Functions 3.1 section 19 Casting[xpath-functions]. Only datatypes
supported by SPARQL MUST be tested but ShEx extensions MAY add support for other datatypes.
|
entailment
|
def nodeSatisfiesStringFacet(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, _c: DebugContext) -> bool:
""" `5.4.5 XML Schema String Facet Constraints <ttp://shex.io/shex-semantics/#xs-string>`_
String facet constraints apply to the lexical form of the RDF Literals and IRIs and blank node
identifiers (see note below regarding access to blank node identifiers).
"""
# Let lex =
#
# * if the value n is an RDF Literal, the lexical form of the literal (see[rdf11-concepts] section 3.3 Literals).
# * if the value n is an IRI, the IRI string (see[rdf11-concepts] section 3.2 IRIs).
# * if the value n is a blank node, the blank node identifier (see[rdf11-concepts] section 3.4 Blank Nodes).
if nc.length is not None or nc.minlength is not None or nc.maxlength is not None \
or nc.pattern is not None:
lex = str(n)
# Let len = the number of unicode codepoints in lex
# For a node n and constraint value v, nodeSatisfies(n, v):
#
# * for "length" constraints, v = len,
# * for "minlength" constraints, v >= len,
# * for "maxlength" constraints, v <= len,
# * for "pattern" constraints, v is unescaped into a valid XPath 3.1 regular expression[xpath-functions-31]
# re and invoking fn:matches(lex, re) returns fn:true. If the flags parameter is present, it is passed
# as a third argument to fn:matches. The pattern may have XPath 3.1 regular expression escape sequences
# per the modified production [10] in section 5.6.1.1 as well as numeric escape sequences of the
# form 'u' HEX HEX HEX HEX or 'U' HEX HEX HEX HEX HEX HEX HEX HEX. Unescaping replaces numeric escape
# sequences with the corresponding unicode codepoint
# TODO: Figure out whether we need to connect this to the lxml exslt functions
# TODO: Map flags if not
if (nc.length is None or len(lex) == nc.length) and \
(nc.minlength is None or len(lex) >= nc.minlength) and \
(nc.maxlength is None or len(lex) <= nc.maxlength) and \
(nc.pattern is None or pattern_match(nc.pattern, nc.flags, lex)):
return True
elif nc.length is not None and len(lex) != nc.length:
cntxt.fail_reason = f"String length mismatch - expected: {nc.length} actual: {len(lex)}"
elif nc.minlength is not None and len(lex) < nc.minlength:
cntxt.fail_reason = f"String length violation - minimum: {nc.minlength} actual: {len(lex)}"
elif nc.maxlength is not None and len(lex) > nc.maxlength:
cntxt.fail_reason = f"String length violation - maximum: {nc.maxlength} actual: {len(lex)}"
elif nc.pattern is not None and not pattern_match(nc.pattern, nc.flags, lex):
cntxt.fail_reason = f"Pattern match failure - pattern: {nc.pattern} flags:{nc.flags}" \
f" string: {lex}"
else:
cntxt.fail_reason = "Programming error - flame the programmer"
return False
else:
return True
|
`5.4.5 XML Schema String Facet Constraints <ttp://shex.io/shex-semantics/#xs-string>`_
String facet constraints apply to the lexical form of the RDF Literals and IRIs and blank node
identifiers (see note below regarding access to blank node identifiers).
|
entailment
|
def nodeSatisfiesNumericFacet(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, _c: DebugContext) -> bool:
""" `5.4.5 XML Schema Numeric Facet Constraints <http://shex.io/shex-semantics/#xs-numeric>`_
Numeric facet constraints apply to the numeric value of RDF Literals with datatypes listed in SPARQL 1.1
Operand Data Types[sparql11-query]. Numeric constraints on non-numeric values fail. totaldigits and
fractiondigits constraints on values not derived from xsd:decimal fail.
"""
if nc.mininclusive is not None or nc.minexclusive is not None or nc.maxinclusive is not None \
or nc.maxexclusive is not None or nc.totaldigits is not None or nc.fractiondigits is not None:
if is_numeric(n):
v = n.value
if isinstance(v, numbers.Number):
if (nc.mininclusive is None or v >= nc.mininclusive) and \
(nc.minexclusive is None or v > nc.minexclusive) and \
(nc.maxinclusive is None or v <= nc.maxinclusive) and \
(nc.maxexclusive is None or v < nc.maxexclusive) and \
(nc.totaldigits is None or (total_digits(n) is not None and
total_digits(n) <= nc.totaldigits)) and \
(nc.fractiondigits is None or (fraction_digits(n) is not None and
fraction_digits(n) <= nc.fractiondigits)):
return True
else:
if nc.mininclusive is not None and v < nc.mininclusive:
cntxt.fail_reason = f"Numeric value volation - minimum inclusive: " \
f"{nc.mininclusive} actual: {v}"
elif nc.minexclusive is not None and v <= nc.minexclusive:
cntxt.fail_reason = f"Numeric value volation - minimum exclusive: " \
f"{nc.minexclusive} actual: {v}"
elif nc.maxinclusive is not None and v > nc.maxinclusive:
cntxt.fail_reason = f"Numeric value volation - maximum inclusive: " \
f"{nc.maxinclusive} actual: {v}"
elif nc.maxexclusive is not None and v >= nc.maxexclusive:
cntxt.fail_reason = f"Numeric value volation - maximum exclusive: " \
f"{nc.maxexclusive} actual: {v}"
elif nc.totaldigits is not None and (total_digits(n) is None or
total_digits(n) > nc.totaldigits):
cntxt.fail_reason = f"Numeric value volation - max total digits: " \
f"{nc.totaldigits} value: {v}"
elif nc.fractiondigits is not None and (fraction_digits(n) is None or
total_digits(n) > nc.fractiondigits):
cntxt.fail_reason = f"Numeric value volation - max fractional digits: " \
f"{nc.fractiondigits} value: {v}"
else:
cntxt.fail_reason = "Impossible error - kick the programmer"
return False
else:
cntxt.fail_reason = "Numeric test on non-number: {v}"
return False
else:
cntxt.fail_reason = "Numeric test on non-number: {n}"
return False
return True
|
`5.4.5 XML Schema Numeric Facet Constraints <http://shex.io/shex-semantics/#xs-numeric>`_
Numeric facet constraints apply to the numeric value of RDF Literals with datatypes listed in SPARQL 1.1
Operand Data Types[sparql11-query]. Numeric constraints on non-numeric values fail. totaldigits and
fractiondigits constraints on values not derived from xsd:decimal fail.
|
entailment
|
def nodeSatisfiesValues(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, _c: DebugContext) -> bool:
""" `5.4.5 Values Constraint <http://shex.io/shex-semantics/#values>`_
For a node n and constraint value v, nodeSatisfies(n, v) if n matches some valueSetValue vsv in v.
"""
if nc.values is None:
return True
else:
if any(_nodeSatisfiesValue(cntxt, n, vsv) for vsv in nc.values):
return True
else:
cntxt.fail_reason = f"Node: {cntxt.n3_mapper.n3(n)} not in value set:\n\t " \
f"{as_json(cntxt.type_last(nc), indent=None)[:60]}..."
return False
|
`5.4.5 Values Constraint <http://shex.io/shex-semantics/#values>`_
For a node n and constraint value v, nodeSatisfies(n, v) if n matches some valueSetValue vsv in v.
|
entailment
|
def _nodeSatisfiesValue(cntxt: Context, n: Node, vsv: ShExJ.valueSetValue) -> bool:
"""
A term matches a valueSetValue if:
* vsv is an objectValue and n = vsv.
* vsv is a Language with langTag lt and n is a language-tagged string with a language tag l and l = lt.
* vsv is a IriStem, LiteralStem or LanguageStem with stem st and nodeIn(n, st).
* vsv is a IriStemRange, LiteralStemRange or LanguageStemRange with stem st and exclusions excls and
nodeIn(n, st) and there is no x in excls such that nodeIn(n, excl).
* vsv is a Wildcard with exclusions excls and there is no x in excls such that nodeIn(n, excl).
Note that ObjectLiteral is *not* typed in ShExJ.jsg, so we identify it by a lack of a 'type' variable
.. note:: Mismatch with spec
This won't work correctly if the stem value is passed in to nodeIn, as there will be no way to know whether
we're matching an IRI or other type
... note:: Language issue
The stem range spec shouldn't have the first element in the exclusions
"""
vsv = map_object_literal(vsv)
if isinstance_(vsv, ShExJ.objectValue):
return objectValueMatches(n, vsv)
if isinstance(vsv, ShExJ.Language):
if vsv.languageTag is not None and isinstance(n, Literal) and n.language is not None:
return n.language == vsv.languageTag
else:
return False
if isinstance(vsv, ShExJ.IriStem):
return nodeInIriStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.IriStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInIriStem(cntxt, n, vsv.stem) and not any(
(uriref_matches_iriref(n, excl) if isinstance(excl, ShExJ.IRIREF) else
uriref_startswith_iriref(n, excl.stem)) for excl in exclusions)
if isinstance(vsv, ShExJ.LiteralStem):
return nodeInLiteralStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.LiteralStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInLiteralStem(cntxt, n, vsv.stem) and not any(str(n) == excl for excl in exclusions)
if isinstance(vsv, ShExJ.LanguageStem):
return nodeInLanguageStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.LanguageStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInLanguageStem(cntxt, n, vsv.stem) and not any(str(n) == str(excl) for excl in exclusions)
return False
|
A term matches a valueSetValue if:
* vsv is an objectValue and n = vsv.
* vsv is a Language with langTag lt and n is a language-tagged string with a language tag l and l = lt.
* vsv is a IriStem, LiteralStem or LanguageStem with stem st and nodeIn(n, st).
* vsv is a IriStemRange, LiteralStemRange or LanguageStemRange with stem st and exclusions excls and
nodeIn(n, st) and there is no x in excls such that nodeIn(n, excl).
* vsv is a Wildcard with exclusions excls and there is no x in excls such that nodeIn(n, excl).
Note that ObjectLiteral is *not* typed in ShExJ.jsg, so we identify it by a lack of a 'type' variable
.. note:: Mismatch with spec
This won't work correctly if the stem value is passed in to nodeIn, as there will be no way to know whether
we're matching an IRI or other type
... note:: Language issue
The stem range spec shouldn't have the first element in the exclusions
|
entailment
|
def nodeInIriStem(_: Context, n: Node, s: ShExJ.IriStem) -> bool:
"""
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInIriStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is an :py:class:`rdflib.URIRef` and fn:starts-with(`n`, `s`)
"""
return isinstance(s, ShExJ.Wildcard) or \
(isinstance(n, URIRef) and uriref_startswith_iriref(n, str(s)))
|
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInIriStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is an :py:class:`rdflib.URIRef` and fn:starts-with(`n`, `s`)
|
entailment
|
def nodeInLiteralStem(_: Context, n: Node, s: ShExJ.LiteralStem) -> bool:
""" http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInLiteralStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is an :py:class:`rdflib.Literal` and fn:starts-with(`n`, `s`)
"""
return isinstance(s, ShExJ.Wildcard) or \
(isinstance(n, Literal) and str(n.value).startswith(str(s)))
|
http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInLiteralStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is an :py:class:`rdflib.Literal` and fn:starts-with(`n`, `s`)
|
entailment
|
def nodeInLanguageStem(_: Context, n: Node, s: ShExJ.LanguageStem) -> bool:
""" http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInLanguageStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
"""
return isinstance(s, ShExJ.Wildcard) or \
(isinstance(n, Literal) and n.language is not None and str(n.language).startswith(str(s)))
|
http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInLanguageStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
|
entailment
|
def nodeInBnodeStem(_cntxt: Context, _n: Node, _s: Union[str, ShExJ.Wildcard]) -> bool:
""" http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInBnodeStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
"""
# TODO: resolve issue #79 to figure out how to do this
return False
|
http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInBnodeStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
|
entailment
|
def run_in_syspy(f):
"""
Decorator to run a function in the system python
:param f:
:return:
"""
fname = f.__name__
code_lines = inspect.getsource(f).splitlines()
code = dedent("\n".join(code_lines[1:])) # strip this decorator
# add call to the function and print it's result
code += dedent("""\n
import sys
args = sys.argv[1:]
result = {fname}(*args)
print("%r" % result)
""").format(fname=fname)
env = os.environ
python = findsyspy()
logger.debug("Create function for system python\n%s" % code)
def call_f(*args):
cmd = [python, '-c', code] + list(args)
output = subprocess.check_output(cmd, env=env).decode('utf-8')
result = ast.literal_eval(output)
return result
return call_f
|
Decorator to run a function in the system python
:param f:
:return:
|
entailment
|
def in_venv():
"""
:return: True if in running from a virtualenv
Has to detect the case where the python binary is run
directly, so VIRTUAL_ENV may not be set
"""
global _in_venv
if _in_venv is not None:
return _in_venv
if not (os.path.isfile(ORIG_PREFIX_TXT) or os.path.isfile(PY_VENV_CFG)):
logger.debug("in_venv no orig_prefix_txt [%s]", ORIG_PREFIX_TXT)
logger.debug("in_venv no py_venv_cfg [%s]", PY_VENV_CFG)
# TODO - check this is actually valid !
_in_venv = False
return _in_venv
if 'VIRTUAL_ENV' in os.environ:
logger.debug("in_venv VIRTUAL_ENV set.")
_in_venv = True
else:
# Find first python in path ... if its not this one,
# ...we are in a different python
python = basename(sys.executable)
for p in os.environ['PATH'].split(os.pathsep):
py_path = join(p, python)
if isfile(py_path):
logger.debug("in_venv py_at [%s] return: %s", (py_path, sys.executable != py_path))
_in_venv = sys.executable != py_path
break
return _in_venv
|
:return: True if in running from a virtualenv
Has to detect the case where the python binary is run
directly, so VIRTUAL_ENV may not be set
|
entailment
|
def getsyssitepackages():
"""
:return: list of site-packages from system python
"""
global _syssitepackages
if not _syssitepackages:
if not in_venv():
_syssitepackages = get_python_lib()
return _syssitepackages
@run_in_syspy
def run(*args):
import site
return site.getsitepackages()
output = run()
_syssitepackages = output
logger.debug("system site packages: %s", _syssitepackages)
return _syssitepackages
|
:return: list of site-packages from system python
|
entailment
|
def findsyspy():
"""
:return: system python executable
"""
if not in_venv():
return sys.executable
python = basename(realpath(sys.executable))
prefix = None
if HAS_ORIG_PREFIX_TXT:
with open(ORIG_PREFIX_TXT) as op:
prefix = op.read()
elif HAS_PY_VENV_CFG:
prefix = getattr(sys, "_home")
if not prefix:
return None
for folder in os.environ['PATH'].split(os.pathsep):
if folder and \
normpath(normcase(folder)).startswith(normcase(normpath(prefix))) and \
isfile(join(folder, python)):
return join(folder, python)
# OSX: Homebrew doesn't leave python in the PATH
if isfile(join(prefix, "bin", python)):
return join(prefix, "bin", python)
|
:return: system python executable
|
entailment
|
def delete(self, key, cas=0):
"""
Delete a key/value from server. If key does not exist, it returns True.
:param key: Key's name to be deleted
:param cas: CAS of the key
:return: True in case o success and False in case of failure.
"""
server = self._get_server(key)
return server.delete(key, cas)
|
Delete a key/value from server. If key does not exist, it returns True.
:param key: Key's name to be deleted
:param cas: CAS of the key
:return: True in case o success and False in case of failure.
|
entailment
|
def set(self, key, value, time=0, compress_level=-1):
"""
Set a value for a key on server.
:param key: Key's name
:type key: str
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
"""
server = self._get_server(key)
return server.set(key, value, time, compress_level)
|
Set a value for a key on server.
:param key: Key's name
:type key: str
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
|
entailment
|
def set_multi(self, mappings, time=0, compress_level=-1):
"""
Set multiple keys with it's values on server.
:param mappings: A dict with keys/values
:type mappings: dict
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
"""
returns = []
if not mappings:
return False
server_mappings = defaultdict(dict)
for key, value in mappings.items():
server_key = self._get_server(key)
server_mappings[server_key].update([(key, value)])
for server, m in server_mappings.items():
returns.append(server.set_multi(m, time, compress_level))
return all(returns)
|
Set multiple keys with it's values on server.
:param mappings: A dict with keys/values
:type mappings: dict
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
|
entailment
|
def add(self, key, value, time=0, compress_level=-1):
"""
Add a key/value to server ony if it does not exist.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True if key is added False if key already exists
:rtype: bool
"""
server = self._get_server(key)
return server.add(key, value, time, compress_level)
|
Add a key/value to server ony if it does not exist.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True if key is added False if key already exists
:rtype: bool
|
entailment
|
def replace(self, key, value, time=0, compress_level=-1):
"""
Replace a key/value to server ony if it does exist.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True if key is replace False if key does not exists
:rtype: bool
"""
server = self._get_server(key)
return server.replace(key, value, time, compress_level)
|
Replace a key/value to server ony if it does exist.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True if key is replace False if key does not exists
:rtype: bool
|
entailment
|
def get_multi(self, keys, get_cas=False):
"""
Get multiple keys from server.
:param keys: A list of keys to from server.
:type keys: list
:param get_cas: If get_cas is true, each value is (data, cas), with each result's CAS value.
:type get_cas: boolean
:return: A dict with all requested keys.
:rtype: dict
"""
servers = defaultdict(list)
d = {}
for key in keys:
server_key = self._get_server(key)
servers[server_key].append(key)
for server, keys in servers.items():
results = server.get_multi(keys)
if not get_cas:
# Remove CAS data
for key, (value, cas) in results.items():
results[key] = value
d.update(results)
return d
|
Get multiple keys from server.
:param keys: A list of keys to from server.
:type keys: list
:param get_cas: If get_cas is true, each value is (data, cas), with each result's CAS value.
:type get_cas: boolean
:return: A dict with all requested keys.
:rtype: dict
|
entailment
|
def cas(self, key, value, cas, time=0, compress_level=-1):
"""
Set a value for a key on server if its CAS value matches cas.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param cas: The CAS value previously obtained from a call to get*.
:type cas: int
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
"""
server = self._get_server(key)
return server.cas(key, value, cas, time, compress_level)
|
Set a value for a key on server if its CAS value matches cas.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param cas: The CAS value previously obtained from a call to get*.
:type cas: int
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
|
entailment
|
def incr(self, key, value):
"""
Increment a key, if it exists, returns it's actual value, if it don't, return 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be incremented
:type value: int
:return: Actual value of the key on server
:rtype: int
"""
server = self._get_server(key)
return server.incr(key, value)
|
Increment a key, if it exists, returns it's actual value, if it don't, return 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be incremented
:type value: int
:return: Actual value of the key on server
:rtype: int
|
entailment
|
def decr(self, key, value):
"""
Decrement a key, if it exists, returns it's actual value, if it don't, return 0.
Minimum value of decrement return is 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be decremented
:type value: int
:return: Actual value of the key on server
:rtype: int
"""
server = self._get_server(key)
return server.decr(key, value)
|
Decrement a key, if it exists, returns it's actual value, if it don't, return 0.
Minimum value of decrement return is 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be decremented
:type value: int
:return: Actual value of the key on server
:rtype: int
|
entailment
|
def satisfiesShape(cntxt: Context, n: Node, S: ShExJ.Shape, c: DebugContext) -> bool:
""" `5.5.2 Semantics <http://shex.io/shex-semantics/#triple-expressions-semantics>`_
For a node `n`, shape `S`, graph `G`, and shapeMap `m`, `satisfies(n, S, G, m)` if and only if:
* `neigh(G, n)` can be partitioned into two sets matched and remainder such that
`matches(matched, expression, m)`. If expression is absent, remainder = `neigh(G, n)`.
:param n: focus node
:param S: Shape to be satisfied
:param cntxt: Evaluation context
:param c: Debug context
:return: true iff `satisfies(n, S, cntxt)`
"""
# Recursion detection. If start_evaluating returns a boolean value, this is the assumed result of the shape
# evaluation. If it returns None, then an initial evaluation is needed
rslt = cntxt.start_evaluating(n, S)
if rslt is None:
cntxt.evaluate_stack.append((n, S.id))
predicates = directed_predicates_in_expression(S, cntxt)
matchables = RDFGraph()
# Note: The code below does an "over-slurp" for the sake of expediency. If you are interested in
# getting EXACTLY the needed triples, set cntxt.over_slurp to false
if isinstance(cntxt.graph, SlurpyGraph) and cntxt.over_slurp:
with slurper(cntxt, n, S) as g:
_ = g.triples((n, None, None))
for predicate, direction in predicates.items():
with slurper(cntxt, n, S) as g:
matchables.add_triples(g.triples((n if direction.is_fwd else None,
iriref_to_uriref(predicate),
n if direction.is_rev else None)))
if c.debug:
print(c.i(1, "predicates:", sorted(cntxt.n3_mapper.n3(p) for p in predicates.keys())))
print(c.i(1, "matchables:", sorted(cntxt.n3_mapper.n3(m) for m in matchables)))
print()
if S.closed:
# TODO: Is this working correctly on reverse items?
non_matchables = RDFGraph([t for t in arcsOut(cntxt.graph, n) if t not in matchables])
if len(non_matchables):
cntxt.fail_reason = "Unmatched triples in CLOSED shape:"
cntxt.fail_reason = '\n'.join(f"\t{t}" for t in non_matchables)
if c.debug:
print(c.i(0,
f"<--- Satisfies shape {c.d()} FAIL - "
f"{len(non_matchables)} non-matching triples on a closed shape"))
print(c.i(1, "", list(non_matchables)))
print()
return False
# Evaluate the actual expression. Start assuming everything matches...
if S.expression:
if matches(cntxt, matchables, S.expression):
rslt = True
else:
extras = {iriref_to_uriref(e) for e in S.extra} if S.extra is not None else {}
if len(extras):
permutable_matchables = RDFGraph([t for t in matchables if t.p in extras])
non_permutable_matchables = RDFGraph([t for t in matchables if t not in permutable_matchables])
if c.debug:
print(c.i(1,
f"Complete match failed -- evaluating extras", list(extras)))
for matched, remainder in partition_2(permutable_matchables):
permutation = non_permutable_matchables.union(matched)
if matches(cntxt, permutation, S.expression):
rslt = True
break
rslt = rslt or False
else:
rslt = True # Empty shape
# If an assumption was made and the result doesn't match the assumption, switch directions and try again
done, consistent = cntxt.done_evaluating(n, S, rslt)
if not done:
rslt = satisfiesShape(cntxt, n, S)
rslt = rslt and consistent
cntxt.evaluate_stack.pop()
return rslt
|
`5.5.2 Semantics <http://shex.io/shex-semantics/#triple-expressions-semantics>`_
For a node `n`, shape `S`, graph `G`, and shapeMap `m`, `satisfies(n, S, G, m)` if and only if:
* `neigh(G, n)` can be partitioned into two sets matched and remainder such that
`matches(matched, expression, m)`. If expression is absent, remainder = `neigh(G, n)`.
:param n: focus node
:param S: Shape to be satisfied
:param cntxt: Evaluation context
:param c: Debug context
:return: true iff `satisfies(n, S, cntxt)`
|
entailment
|
def valid_remainder(cntxt: Context, n: Node, matchables: RDFGraph, S: ShExJ.Shape) -> bool:
"""
Let **outs** be the arcsOut in remainder: `outs = remainder ∩ arcsOut(G, n)`.
Let **matchables** be the triples in outs whose predicate appears in a TripleConstraint in `expression`. If
`expression` is absent, matchables = Ø (the empty set).
* There is no triple in **matchables** which matches a TripleConstraint in expression
* There is no triple in **matchables** whose predicate does not appear in extra.
* closed is false or unmatchables is empty
:param cntxt: evaluation context
:param n: focus node
:param matchables: non-matched triples
:param S: Shape being evaluated
:return: True if remainder is valid
"""
# TODO: Update this and satisfies to address the new algorithm
# Let **outs** be the arcsOut in remainder: `outs = remainder ∩ arcsOut(G, n)`.
outs = arcsOut(cntxt.graph, n).intersection(matchables)
# predicates that in a TripleConstraint in `expression`
predicates = predicates_in_expression(S, cntxt)
# Let **matchables** be the triples in outs whose predicate appears in predicates. If
# `expression` is absent, matchables = Ø (the empty set).
matchables = RDFGraph(t for t in outs if str(t.p) in predicates)
# There is no triple in **matchables** which matches a TripleConstraint in expression
if matchables and S.expression is not None:
tes = triple_constraints_in_expression(S.expression, cntxt)
for m in matchables:
if any(matchesTripleConstraint(cntxt, m, te) for te in tes):
return False
# There is no triple in **matchables** whose predicate does not appear in extra.
extras = {iriref_to_uriref(e) for e in S.extra} if S.extra is not None else {}
if any(t.p not in extras for t in matchables):
return False
# closed is false or unmatchables is empty.
return not S.closed.val or not bool(outs - matchables)
|
Let **outs** be the arcsOut in remainder: `outs = remainder ∩ arcsOut(G, n)`.
Let **matchables** be the triples in outs whose predicate appears in a TripleConstraint in `expression`. If
`expression` is absent, matchables = Ø (the empty set).
* There is no triple in **matchables** which matches a TripleConstraint in expression
* There is no triple in **matchables** whose predicate does not appear in extra.
* closed is false or unmatchables is empty
:param cntxt: evaluation context
:param n: focus node
:param matchables: non-matched triples
:param S: Shape being evaluated
:return: True if remainder is valid
|
entailment
|
def matches(cntxt: Context, T: RDFGraph, expr: ShExJ.tripleExpr) -> bool:
"""
**matches**: asserts that a triple expression is matched by a set of triples that come from the neighbourhood of a
node in an RDF graph. The expression `matches(T, expr, m)` indicates that a set of triples `T` can satisfy these
rules:
* expr has semActs and `matches(T, expr, m)` by the remaining rules in this list and the evaluation
of semActs succeeds according to the section below on Semantic Actions.
* expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and T
can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
`matches(Tn, expr, m)` by the remaining rules in this list.
* expr is a OneOf and there is some shape expression se2 in shapeExprs such that a matches(T, se2, m).
* expr is an EachOf and there is some partition of T into T1, T2,… such that for every expression
expr1, expr2,… in shapeExprs, matches(Tn, exprn, m).
* expr is a TripleConstraint and:
* T is a set of one triple. Let t be the soul triple in T.
* t's predicate equals expr's predicate. Let value be t's subject if inverse is true, else t's object.
* if inverse is true, t is in arcsIn, else t is in `arcsOut`.
* either
* expr has no valueExpr
* or `satisfies(value, valueExpr, G, m).
"""
if isinstance_(expr, ShExJ.tripleExprLabel):
return matchesExpr(cntxt, T, expr)
else:
return matchesCardinality(cntxt, T, expr) and (expr.semActs is None or semActsSatisfied(expr.semActs, cntxt))
|
**matches**: asserts that a triple expression is matched by a set of triples that come from the neighbourhood of a
node in an RDF graph. The expression `matches(T, expr, m)` indicates that a set of triples `T` can satisfy these
rules:
* expr has semActs and `matches(T, expr, m)` by the remaining rules in this list and the evaluation
of semActs succeeds according to the section below on Semantic Actions.
* expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and T
can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
`matches(Tn, expr, m)` by the remaining rules in this list.
* expr is a OneOf and there is some shape expression se2 in shapeExprs such that a matches(T, se2, m).
* expr is an EachOf and there is some partition of T into T1, T2,… such that for every expression
expr1, expr2,… in shapeExprs, matches(Tn, exprn, m).
* expr is a TripleConstraint and:
* T is a set of one triple. Let t be the soul triple in T.
* t's predicate equals expr's predicate. Let value be t's subject if inverse is true, else t's object.
* if inverse is true, t is in arcsIn, else t is in `arcsOut`.
* either
* expr has no valueExpr
* or `satisfies(value, valueExpr, G, m).
|
entailment
|
def matchesCardinality(cntxt: Context, T: RDFGraph, expr: Union[ShExJ.tripleExpr, ShExJ.tripleExprLabel],
c: DebugContext) -> bool:
""" Evaluate cardinality expression
expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and
T can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
matches(Tn, expr, m) by the remaining rules in this list.
"""
# TODO: Cardinality defaults into spec
min_ = expr.min if expr.min is not None else 1
max_ = expr.max if expr.max is not None else 1
cardinality_text = f"{{{min_},{'*' if max_ == -1 else max_}}}"
if c.debug and (min_ != 0 or len(T) != 0):
print(f"{cardinality_text} matching {len(T)} triples")
if min_ == 0 and len(T) == 0:
return True
if isinstance(expr, ShExJ.TripleConstraint):
if len(T) < min_:
if len(T) > 0:
_fail_triples(cntxt, T)
cntxt.fail_reason = f" {len(T)} triples less than {cardinality_text}"
else:
cntxt.fail_reason = f" No matching triples found for predicate {cntxt.n3_mapper.n3(expr.predicate)}"
return False
elif 0 <= max_ < len(T):
_fail_triples(cntxt, T)
cntxt.fail_reason = f" {len(T)} triples exceeds max {cardinality_text}"
return False
else:
return all(matchesTripleConstraint(cntxt, t, expr) for t in T)
else:
for partition in _partitions(T, min_, max_):
if all(matchesExpr(cntxt, part, expr) for part in partition):
return True
if min_ != 1 or max_ != 1:
_fail_triples(cntxt, T)
cntxt.fail_reason = f" {len(T)} triples cannot be partitioned into {cardinality_text} passing groups"
return False
|
Evaluate cardinality expression
expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and
T can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
matches(Tn, expr, m) by the remaining rules in this list.
|
entailment
|
def matchesExpr(cntxt: Context, T: RDFGraph, expr: ShExJ.tripleExpr, _: DebugContext) -> bool:
""" Evaluate the expression
"""
if isinstance(expr, ShExJ.OneOf):
return matchesOneOf(cntxt, T, expr)
elif isinstance(expr, ShExJ.EachOf):
return matchesEachOf(cntxt, T, expr)
elif isinstance(expr, ShExJ.TripleConstraint):
return matchesCardinality(cntxt, T, expr)
elif isinstance_(expr, ShExJ.tripleExprLabel):
return matchesTripleExprRef(cntxt, T, expr)
else:
raise Exception("Unknown expression")
|
Evaluate the expression
|
entailment
|
def matchesOneOf(cntxt: Context, T: RDFGraph, expr: ShExJ.OneOf, _: DebugContext) -> bool:
"""
expr is a OneOf and there is some shape expression se2 in shapeExprs such that a matches(T, se2, m).
"""
return any(matches(cntxt, T, e) for e in expr.expressions)
|
expr is a OneOf and there is some shape expression se2 in shapeExprs such that a matches(T, se2, m).
|
entailment
|
def matchesEachOf(cntxt: Context, T: RDFGraph, expr: ShExJ.EachOf, _: DebugContext) -> bool:
""" expr is an EachOf and there is some partition of T into T1, T2,… such that for every expression
expr1, expr2,… in shapeExprs, matches(Tn, exprn, m).
"""
return EachOfEvaluator(cntxt, T, expr).evaluate(cntxt)
|
expr is an EachOf and there is some partition of T into T1, T2,… such that for every expression
expr1, expr2,… in shapeExprs, matches(Tn, exprn, m).
|
entailment
|
def matchesTripleConstraint(cntxt: Context, t: RDFTriple, expr: ShExJ.TripleConstraint, c: DebugContext) -> bool:
"""
expr is a TripleConstraint and:
* t is a triple
* t's predicate equals expr's predicate.
Let value be t's subject if inverse is true, else t's object.
* if inverse is true, t is in arcsIn, else t is in arcsOut.
"""
from pyshex.shape_expressions_language.p5_3_shape_expressions import satisfies
if c.debug:
print(c.i(1, f" triple: {t}"))
print(c.i(1, '', expr._as_json_dumps().split('\n')))
if uriref_matches_iriref(t.p, expr.predicate):
value = t.s if expr.inverse else t.o
return expr.valueExpr is None or satisfies(cntxt, value, expr.valueExpr)
else:
cntxt.fail_reason = f"Predicate mismatch: {t.p} ≠ {expr.predicate}"
return False
|
expr is a TripleConstraint and:
* t is a triple
* t's predicate equals expr's predicate.
Let value be t's subject if inverse is true, else t's object.
* if inverse is true, t is in arcsIn, else t is in arcsOut.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.