sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def add_panel_to_edit_handler(model, panel_cls, heading, index=None):
"""
Adds specified panel class to model class.
:param model: the model class.
:param panel_cls: the panel class.
:param heading: the panel heading.
:param index: the index position to insert at.
"""
from wagtail.wagtailadmin.views.pages import get_page_edit_handler
edit_handler = get_page_edit_handler(model)
panel_instance = ObjectList(
[panel_cls(),],
heading = heading
).bind_to_model(model)
if index:
edit_handler.children.insert(index, panel_instance)
else:
edit_handler.children.append(panel_instance)
|
Adds specified panel class to model class.
:param model: the model class.
:param panel_cls: the panel class.
:param heading: the panel heading.
:param index: the index position to insert at.
|
entailment
|
def get_context_data(self, **kwargs):
"""
Returns context dictionary for view.
:rtype: dict.
"""
#noinspection PyUnresolvedReferences
query_str = self.request.GET.get('q', None)
queryset = kwargs.pop('object_list', self.object_list)
context_object_name = self.get_context_object_name(queryset)
# Build the context dictionary.
context = {
'ordering': self.get_ordering(),
'query_string': query_str,
'is_searching': bool(query_str),
}
# Add extra variables to context for non-AJAX requests.
#noinspection PyUnresolvedReferences
if not self.request.is_ajax() or kwargs.get('force_search', False):
context.update({
'search_form': self.get_search_form(),
'popular_tags': self.model.popular_tags()
})
if context_object_name is not None:
context[context_object_name] = queryset
# Update context with any additional keyword arguments.
context.update(kwargs)
return super(IndexView, self).get_context_data(**context)
|
Returns context dictionary for view.
:rtype: dict.
|
entailment
|
def get_ordering(self):
"""
Returns ordering value for list.
:rtype: str.
"""
#noinspection PyUnresolvedReferences
ordering = self.request.GET.get('ordering', None)
if ordering not in ['title', '-created_at']:
ordering = '-created_at'
return ordering
|
Returns ordering value for list.
:rtype: str.
|
entailment
|
def get_queryset(self):
"""
Returns queryset instance.
:rtype: django.db.models.query.QuerySet.
"""
queryset = super(IndexView, self).get_queryset()
search_form = self.get_search_form()
if search_form.is_valid():
query_str = search_form.cleaned_data.get('q', '').strip()
queryset = self.model.objects.search(query_str)
return queryset
|
Returns queryset instance.
:rtype: django.db.models.query.QuerySet.
|
entailment
|
def get_search_form(self):
"""
Returns search form instance.
:rtype: django.forms.ModelForm.
"""
#noinspection PyUnresolvedReferences
if 'q' in self.request.GET:
#noinspection PyUnresolvedReferences
return self.search_form_class(self.request.GET)
else:
return self.search_form_class(placeholder=_(u'Search'))
|
Returns search form instance.
:rtype: django.forms.ModelForm.
|
entailment
|
def get_template_names(self):
"""
Returns a list of template names for the view.
:rtype: list.
"""
#noinspection PyUnresolvedReferences
if self.request.is_ajax():
template_name = '/results.html'
else:
template_name = '/index.html'
return ['{0}{1}'.format(self.template_dir, template_name)]
|
Returns a list of template names for the view.
:rtype: list.
|
entailment
|
def paginate_queryset(self, queryset, page_size):
"""
Returns tuple containing paginator instance, page instance,
object list, and whether there are other pages.
:param queryset: the queryset instance to paginate.
:param page_size: the number of instances per page.
:rtype: tuple.
"""
paginator = self.get_paginator(
queryset,
page_size,
orphans = self.get_paginate_orphans(),
allow_empty_first_page = self.get_allow_empty()
)
page_kwarg = self.page_kwarg
#noinspection PyUnresolvedReferences
page_num = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1
# Default to a valid page.
try:
page = paginator.page(page_num)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
#noinspection PyRedundantParentheses
return (paginator, page, page.object_list, page.has_other_pages())
|
Returns tuple containing paginator instance, page instance,
object list, and whether there are other pages.
:param queryset: the queryset instance to paginate.
:param page_size: the number of instances per page.
:rtype: tuple.
|
entailment
|
def form_invalid(self, form):
"""
Processes an invalid form submittal.
:param form: the form instance.
:rtype: django.http.HttpResponse.
"""
meta = getattr(self.model, '_meta')
#noinspection PyUnresolvedReferences
messages.error(
self.request,
_(u'The {0} could not be saved due to errors.').format(
meta.verbose_name.lower()
)
)
return super(BaseEditView, self).form_invalid(form)
|
Processes an invalid form submittal.
:param form: the form instance.
:rtype: django.http.HttpResponse.
|
entailment
|
def form_valid(self, form):
"""
Processes a valid form submittal.
:param form: the form instance.
:rtype: django.http.HttpResponse.
"""
#noinspection PyAttributeOutsideInit
self.object = form.save()
meta = getattr(self.object, '_meta')
# Index the object.
for backend in get_search_backends():
backend.add(object)
#noinspection PyUnresolvedReferences
messages.success(
self.request,
_(u'{0} "{1}" saved.').format(
meta.verbose_name,
str(self.object)
),
buttons=[messages.button(
reverse(
'{0}:edit'.format(self.url_namespace),
args=(self.object.id,)
),
_(u'Edit')
)]
)
return redirect(self.get_success_url())
|
Processes a valid form submittal.
:param form: the form instance.
:rtype: django.http.HttpResponse.
|
entailment
|
def get_success_url(self):
"""
Returns redirect URL for valid form submittal.
:rtype: str.
"""
if self.success_url:
url = force_text(self.success_url)
else:
url = reverse('{0}:index'.format(self.url_namespace))
return url
|
Returns redirect URL for valid form submittal.
:rtype: str.
|
entailment
|
def delete(self, request, *args, **kwargs):
"""
Processes deletion of the specified instance.
:param request: the request instance.
:rtype: django.http.HttpResponse.
"""
#noinspection PyAttributeOutsideInit
self.object = self.get_object()
success_url = self.get_success_url()
meta = getattr(self.object, '_meta')
self.object.delete()
messages.success(
request,
_(u'{0} "{1}" deleted.').format(
meta.verbose_name.lower(),
str(self.object)
)
)
return redirect(success_url)
|
Processes deletion of the specified instance.
:param request: the request instance.
:rtype: django.http.HttpResponse.
|
entailment
|
def chunked(iterable, n):
"""Returns chunks of n length of iterable
If len(iterable) % n != 0, then the last chunk will have length
less than n.
Example:
>>> chunked([1, 2, 3, 4, 5], 2)
[(1, 2), (3, 4), (5,)]
"""
iterable = iter(iterable)
while 1:
t = tuple(islice(iterable, n))
if t:
yield t
else:
return
|
Returns chunks of n length of iterable
If len(iterable) % n != 0, then the last chunk will have length
less than n.
Example:
>>> chunked([1, 2, 3, 4, 5], 2)
[(1, 2), (3, 4), (5,)]
|
entailment
|
def format_explanation(explanation, indent=' ', indent_level=0):
"""Return explanation in an easier to read format
Easier to read for me, at least.
"""
if not explanation:
return ''
# Note: This is probably a crap implementation, but it's an
# interesting starting point for a better formatter.
line = ('%s%s %2.4f' % ((indent * indent_level),
explanation['description'],
explanation['value']))
if 'details' in explanation:
details = '\n'.join(
[format_explanation(subtree, indent, indent_level + 1)
for subtree in explanation['details']])
return line + '\n' + details
return line
|
Return explanation in an easier to read format
Easier to read for me, at least.
|
entailment
|
def get_es(**overrides):
"""Return a elasticsearch Elasticsearch object using settings
from ``settings.py``.
:arg overrides: Allows you to override defaults to create the
ElasticSearch object. You can override any of the arguments
isted in :py:func:`elasticutils.get_es`.
For example, if you wanted to create an ElasticSearch with a
longer timeout to a different cluster, you'd do:
>>> from elasticutils.contrib.django import get_es
>>> es = get_es(urls=['http://some_other_cluster:9200'], timeout=30)
"""
defaults = {
'urls': settings.ES_URLS,
'timeout': getattr(settings, 'ES_TIMEOUT', 5)
}
defaults.update(overrides)
return base_get_es(**defaults)
|
Return a elasticsearch Elasticsearch object using settings
from ``settings.py``.
:arg overrides: Allows you to override defaults to create the
ElasticSearch object. You can override any of the arguments
isted in :py:func:`elasticutils.get_es`.
For example, if you wanted to create an ElasticSearch with a
longer timeout to a different cluster, you'd do:
>>> from elasticutils.contrib.django import get_es
>>> es = get_es(urls=['http://some_other_cluster:9200'], timeout=30)
|
entailment
|
def es_required(fun):
"""Wrap a callable and return None if ES_DISABLED is False.
This also adds an additional `es` argument to the callable
giving you an ElasticSearch instance to use.
"""
@wraps(fun)
def wrapper(*args, **kw):
if getattr(settings, 'ES_DISABLED', False):
log.debug('Search disabled for %s.' % fun)
return
return fun(*args, es=get_es(), **kw)
return wrapper
|
Wrap a callable and return None if ES_DISABLED is False.
This also adds an additional `es` argument to the callable
giving you an ElasticSearch instance to use.
|
entailment
|
def get_es(self, default_builder=get_es):
"""Returns the elasticsearch Elasticsearch object to use.
This uses the django get_es builder by default which takes
into account settings in ``settings.py``.
"""
return super(S, self).get_es(default_builder=default_builder)
|
Returns the elasticsearch Elasticsearch object to use.
This uses the django get_es builder by default which takes
into account settings in ``settings.py``.
|
entailment
|
def get_indexes(self, default_indexes=None):
"""Returns the list of indexes to act on based on ES_INDEXES setting
"""
doctype = self.type.get_mapping_type_name()
indexes = (settings.ES_INDEXES.get(doctype) or
settings.ES_INDEXES['default'])
if isinstance(indexes, six.string_types):
indexes = [indexes]
return super(S, self).get_indexes(default_indexes=indexes)
|
Returns the list of indexes to act on based on ES_INDEXES setting
|
entailment
|
def get_doctypes(self, default_doctypes=None):
"""Returns the doctypes (or mapping type names) to use."""
doctypes = self.type.get_mapping_type_name()
if isinstance(doctypes, six.string_types):
doctypes = [doctypes]
return super(S, self).get_doctypes(default_doctypes=doctypes)
|
Returns the doctypes (or mapping type names) to use.
|
entailment
|
def get_index(cls):
"""Gets the index for this model.
The index for this model is specified in `settings.ES_INDEXES`
which is a dict of mapping type -> index name.
By default, this uses `.get_mapping_type()` to determine the
mapping and returns the value in `settings.ES_INDEXES` for that
or ``settings.ES_INDEXES['default']``.
Override this to compute it differently.
:returns: index name to use
"""
indexes = settings.ES_INDEXES
index = indexes.get(cls.get_mapping_type_name()) or indexes['default']
if not (isinstance(index, six.string_types)):
# FIXME - not sure what to do here, but we only want one
# index and somehow this isn't one index.
index = index[0]
return index
|
Gets the index for this model.
The index for this model is specified in `settings.ES_INDEXES`
which is a dict of mapping type -> index name.
By default, this uses `.get_mapping_type()` to determine the
mapping and returns the value in `settings.ES_INDEXES` for that
or ``settings.ES_INDEXES['default']``.
Override this to compute it differently.
:returns: index name to use
|
entailment
|
def get_indexable(cls):
"""Returns the queryset of ids of all things to be indexed.
Defaults to::
cls.get_model().objects.order_by('id').values_list(
'id', flat=True)
:returns: iterable of ids of objects to be indexed
"""
model = cls.get_model()
return model.objects.order_by('id').values_list('id', flat=True)
|
Returns the queryset of ids of all things to be indexed.
Defaults to::
cls.get_model().objects.order_by('id').values_list(
'id', flat=True)
:returns: iterable of ids of objects to be indexed
|
entailment
|
def get_es(urls=None, timeout=DEFAULT_TIMEOUT, force_new=False, **settings):
"""Create an elasticsearch `Elasticsearch` object and return it.
This will aggressively re-use `Elasticsearch` objects with the
following rules:
1. if you pass the same argument values to `get_es()`, then it
will return the same `Elasticsearch` object
2. if you pass different argument values to `get_es()`, then it
will return different `Elasticsearch` object
3. it caches each `Elasticsearch` object that gets created
4. if you pass in `force_new=True`, then you are guaranteed to get
a fresh `Elasticsearch` object AND that object will not be
cached
:arg urls: list of uris; Elasticsearch hosts to connect to,
defaults to ``['http://localhost:9200']``
:arg timeout: int; the timeout in seconds, defaults to 5
:arg force_new: Forces get_es() to generate a new Elasticsearch
object rather than pulling it from cache.
:arg settings: other settings to pass into Elasticsearch
constructor; See
`<http://elasticsearch-py.readthedocs.org/>`_ for more details.
Examples::
# Returns cached Elasticsearch object
es = get_es()
# Returns a new Elasticsearch object
es = get_es(force_new=True)
es = get_es(urls=['localhost'])
es = get_es(urls=['localhost:9200'], timeout=10,
max_retries=3)
"""
# Cheap way of de-None-ifying things
urls = urls or DEFAULT_URLS
# v0.7: Check for 'hosts' instead of 'urls'. Take this out in v1.0.
if 'hosts' in settings:
raise DeprecationWarning('"hosts" is deprecated in favor of "urls".')
if not force_new:
key = _build_key(urls, timeout, **settings)
if key in _cached_elasticsearch:
return _cached_elasticsearch[key]
es = Elasticsearch(urls, timeout=timeout, **settings)
if not force_new:
# We don't need to rebuild the key here since we built it in
# the previous if block, so it's in the namespace. Having said
# that, this is a little ew.
_cached_elasticsearch[key] = es
return es
|
Create an elasticsearch `Elasticsearch` object and return it.
This will aggressively re-use `Elasticsearch` objects with the
following rules:
1. if you pass the same argument values to `get_es()`, then it
will return the same `Elasticsearch` object
2. if you pass different argument values to `get_es()`, then it
will return different `Elasticsearch` object
3. it caches each `Elasticsearch` object that gets created
4. if you pass in `force_new=True`, then you are guaranteed to get
a fresh `Elasticsearch` object AND that object will not be
cached
:arg urls: list of uris; Elasticsearch hosts to connect to,
defaults to ``['http://localhost:9200']``
:arg timeout: int; the timeout in seconds, defaults to 5
:arg force_new: Forces get_es() to generate a new Elasticsearch
object rather than pulling it from cache.
:arg settings: other settings to pass into Elasticsearch
constructor; See
`<http://elasticsearch-py.readthedocs.org/>`_ for more details.
Examples::
# Returns cached Elasticsearch object
es = get_es()
# Returns a new Elasticsearch object
es = get_es(force_new=True)
es = get_es(urls=['localhost'])
es = get_es(urls=['localhost:9200'], timeout=10,
max_retries=3)
|
entailment
|
def _facet_counts(items):
"""Returns facet counts as dict.
Given the `items()` on the raw dictionary from Elasticsearch this processes
it and returns the counts keyed on the facet name provided in the original
query.
"""
facets = {}
for name, data in items:
facets[name] = FacetResult(name, data)
return facets
|
Returns facet counts as dict.
Given the `items()` on the raw dictionary from Elasticsearch this processes
it and returns the counts keyed on the facet name provided in the original
query.
|
entailment
|
def _boosted_value(name, action, key, value, boost):
"""Boost a value if we should in _process_queries"""
if boost is not None:
# Note: Most queries use 'value' for the key name except
# Match queries which use 'query'. So we have to do some
# switcheroo for that.
value_key = 'query' if action in MATCH_ACTIONS else 'value'
return {name: {'boost': boost, value_key: value}}
return {name: value}
|
Boost a value if we should in _process_queries
|
entailment
|
def decorate_with_metadata(obj, result):
"""Return obj decorated with es_meta object"""
# Create es_meta object with Elasticsearch metadata about this
# search result
obj.es_meta = Metadata(
# Elasticsearch id
id=result.get('_id', 0),
# Source data
source=result.get('_source', {}),
# The search result score
score=result.get('_score', None),
# The document type
type=result.get('_type', None),
# Explanation of score
explanation=result.get('_explanation', {}),
# Highlight bits
highlight=result.get('highlight', {})
)
# Put the id on the object for convenience
obj._id = result.get('_id', 0)
return obj
|
Return obj decorated with es_meta object
|
entailment
|
def _combine(self, other, conn='and'):
"""
OR and AND will create a new F, with the filters from both F
objects combined with the connector `conn`.
"""
f = F()
self_filters = copy.deepcopy(self.filters)
other_filters = copy.deepcopy(other.filters)
if not self.filters:
f.filters = other_filters
elif not other.filters:
f.filters = self_filters
elif conn in self.filters[0]:
f.filters = self_filters
f.filters[0][conn].extend(other_filters)
elif conn in other.filters[0]:
f.filters = other_filters
f.filters[0][conn].extend(self_filters)
else:
f.filters = [{conn: self_filters + other_filters}]
return f
|
OR and AND will create a new F, with the filters from both F
objects combined with the connector `conn`.
|
entailment
|
def to_python(self, obj):
"""Converts strings in a data structure to Python types
It converts datetime-ish things to Python datetimes.
Override if you want something different.
:arg obj: Python datastructure
:returns: Python datastructure with strings converted to
Python types
.. Note::
This does the conversion in-place!
"""
if isinstance(obj, string_types):
if len(obj) == 26:
try:
return datetime.strptime(obj, '%Y-%m-%dT%H:%M:%S.%f')
except (TypeError, ValueError):
pass
elif len(obj) == 19:
try:
return datetime.strptime(obj, '%Y-%m-%dT%H:%M:%S')
except (TypeError, ValueError):
pass
elif len(obj) == 10:
try:
return datetime.strptime(obj, '%Y-%m-%d')
except (TypeError, ValueError):
pass
elif isinstance(obj, dict):
for key, val in obj.items():
obj[key] = self.to_python(val)
elif isinstance(obj, list):
return [self.to_python(item) for item in obj]
return obj
|
Converts strings in a data structure to Python types
It converts datetime-ish things to Python datetimes.
Override if you want something different.
:arg obj: Python datastructure
:returns: Python datastructure with strings converted to
Python types
.. Note::
This does the conversion in-place!
|
entailment
|
def query(self, *queries, **kw):
"""
Return a new S instance with query args combined with existing
set in a must boolean query.
:arg queries: instances of Q
:arg kw: queries in the form of ``field__action=value``
There are three special flags you can use:
* ``must=True``: Specifies that the queries and kw queries
**must match** in order for a document to be in the result.
If you don't specify a special flag, this is the default.
* ``should=True``: Specifies that the queries and kw queries
**should match** in order for a document to be in the result.
* ``must_not=True``: Specifies the queries and kw queries
**must not match** in order for a document to be in the result.
These flags work by putting those queries in the appropriate
clause of an Elasticsearch boolean query.
Examples:
>>> s = S().query(foo='bar')
>>> s = S().query(Q(foo='bar'))
>>> s = S().query(foo='bar', bat__match='baz')
>>> s = S().query(foo='bar', should=True)
>>> s = S().query(foo='bar', should=True).query(baz='bat', must=True)
Notes:
1. Don't specify multiple special flags, but if you did, `should`
takes precedence.
2. If you don't specify any, it defaults to `must`.
3. You can specify special flags in the
:py:class:`elasticutils.Q`, too. If you're building your
query incrementally, using :py:class:`elasticutils.Q` helps
a lot.
See the documentation on :py:class:`elasticutils.Q` for more
details on composing queries with Q.
See the documentation on :py:class:`elasticutils.S` for more
details on adding support for more query types.
"""
q = Q()
for query in queries:
q += query
if 'or_' in kw:
# Backwards compatibile with pre-0.7 version.
or_query = kw.pop('or_')
# or_query here is a dict of key/val pairs. or_ indicates
# they're in a should clause, so we generate the
# equivalent Q and then add it in.
or_query['should'] = True
q += Q(**or_query)
q += Q(**kw)
return self._clone(next_step=('query', q))
|
Return a new S instance with query args combined with existing
set in a must boolean query.
:arg queries: instances of Q
:arg kw: queries in the form of ``field__action=value``
There are three special flags you can use:
* ``must=True``: Specifies that the queries and kw queries
**must match** in order for a document to be in the result.
If you don't specify a special flag, this is the default.
* ``should=True``: Specifies that the queries and kw queries
**should match** in order for a document to be in the result.
* ``must_not=True``: Specifies the queries and kw queries
**must not match** in order for a document to be in the result.
These flags work by putting those queries in the appropriate
clause of an Elasticsearch boolean query.
Examples:
>>> s = S().query(foo='bar')
>>> s = S().query(Q(foo='bar'))
>>> s = S().query(foo='bar', bat__match='baz')
>>> s = S().query(foo='bar', should=True)
>>> s = S().query(foo='bar', should=True).query(baz='bat', must=True)
Notes:
1. Don't specify multiple special flags, but if you did, `should`
takes precedence.
2. If you don't specify any, it defaults to `must`.
3. You can specify special flags in the
:py:class:`elasticutils.Q`, too. If you're building your
query incrementally, using :py:class:`elasticutils.Q` helps
a lot.
See the documentation on :py:class:`elasticutils.Q` for more
details on composing queries with Q.
See the documentation on :py:class:`elasticutils.S` for more
details on adding support for more query types.
|
entailment
|
def filter(self, *filters, **kw):
"""
Return a new S instance with filter args combined with
existing set with AND.
:arg filters: this will be instances of F
:arg kw: this will be in the form of ``field__action=value``
Examples:
>>> s = S().filter(foo='bar')
>>> s = S().filter(F(foo='bar'))
>>> s = S().filter(foo='bar', bat='baz')
>>> s = S().filter(foo='bar').filter(bat='baz')
By default, everything is combined using AND. If you provide
multiple filters in a single filter call, those are ANDed
together. If you provide multiple filters in multiple filter
calls, those are ANDed together.
If you want something different, use the F class which supports
``&`` (and), ``|`` (or) and ``~`` (not) operators. Then call
filter once with the resulting F instance.
See the documentation on :py:class:`elasticutils.F` for more
details on composing filters with F.
See the documentation on :py:class:`elasticutils.S` for more
details on adding support for new filter types.
"""
items = kw.items()
if six.PY3:
items = list(items)
return self._clone(
next_step=('filter', list(filters) + items))
|
Return a new S instance with filter args combined with
existing set with AND.
:arg filters: this will be instances of F
:arg kw: this will be in the form of ``field__action=value``
Examples:
>>> s = S().filter(foo='bar')
>>> s = S().filter(F(foo='bar'))
>>> s = S().filter(foo='bar', bat='baz')
>>> s = S().filter(foo='bar').filter(bat='baz')
By default, everything is combined using AND. If you provide
multiple filters in a single filter call, those are ANDed
together. If you provide multiple filters in multiple filter
calls, those are ANDed together.
If you want something different, use the F class which supports
``&`` (and), ``|`` (or) and ``~`` (not) operators. Then call
filter once with the resulting F instance.
See the documentation on :py:class:`elasticutils.F` for more
details on composing filters with F.
See the documentation on :py:class:`elasticutils.S` for more
details on adding support for new filter types.
|
entailment
|
def boost(self, **kw):
"""
Return a new S instance with field boosts.
ElasticUtils allows you to specify query-time field boosts
with ``.boost()``. It takes a set of arguments where the keys
are either field names or field name + ``__`` + field action.
Examples::
q = (S().query(title='taco trucks',
description__match='awesome')
.boost(title=4.0, description__match=2.0))
If the key is a field name, then the boost will apply to all
query bits that have that field name. For example::
q = (S().query(title='trucks',
title__prefix='trucks',
title__fuzzy='trucks')
.boost(title=4.0))
applies a 4.0 boost to all three query bits because all three
query bits are for the title field name.
If the key is a field name and field action, then the boost
will apply only to that field name and field action. For
example::
q = (S().query(title='trucks',
title__prefix='trucks',
title__fuzzy='trucks')
.boost(title__prefix=4.0))
will only apply the 4.0 boost to title__prefix.
Boosts are relative to one another and all boosts default to
1.0.
For example, if you had::
qs = (S().boost(title=4.0, summary=2.0)
.query(title__match=value,
summary__match=value,
content__match=value,
should=True))
``title__match`` would be boosted twice as much as
``summary__match`` and ``summary__match`` twice as much as
``content__match``.
"""
new = self._clone()
new.field_boosts.update(kw)
return new
|
Return a new S instance with field boosts.
ElasticUtils allows you to specify query-time field boosts
with ``.boost()``. It takes a set of arguments where the keys
are either field names or field name + ``__`` + field action.
Examples::
q = (S().query(title='taco trucks',
description__match='awesome')
.boost(title=4.0, description__match=2.0))
If the key is a field name, then the boost will apply to all
query bits that have that field name. For example::
q = (S().query(title='trucks',
title__prefix='trucks',
title__fuzzy='trucks')
.boost(title=4.0))
applies a 4.0 boost to all three query bits because all three
query bits are for the title field name.
If the key is a field name and field action, then the boost
will apply only to that field name and field action. For
example::
q = (S().query(title='trucks',
title__prefix='trucks',
title__fuzzy='trucks')
.boost(title__prefix=4.0))
will only apply the 4.0 boost to title__prefix.
Boosts are relative to one another and all boosts default to
1.0.
For example, if you had::
qs = (S().boost(title=4.0, summary=2.0)
.query(title__match=value,
summary__match=value,
content__match=value,
should=True))
``title__match`` would be boosted twice as much as
``summary__match`` and ``summary__match`` twice as much as
``content__match``.
|
entailment
|
def demote(self, amount_, *queries, **kw):
"""
Returns a new S instance with boosting query and demotion.
You can demote documents that match query criteria::
q = (S().query(title='trucks')
.demote(0.5, description__match='gross'))
q = (S().query(title='trucks')
.demote(0.5, Q(description__match='gross')))
This is implemented using the boosting query in
Elasticsearch. Anything you specify with ``.query()`` goes
into the positive section. The negative query and negative
boost portions are specified as the first and second arguments
to ``.demote()``.
.. Note::
Calling this again will overwrite previous ``.demote()``
calls.
"""
q = Q()
for query in queries:
q += query
q += Q(**kw)
return self._clone(next_step=('demote', (amount_, q)))
|
Returns a new S instance with boosting query and demotion.
You can demote documents that match query criteria::
q = (S().query(title='trucks')
.demote(0.5, description__match='gross'))
q = (S().query(title='trucks')
.demote(0.5, Q(description__match='gross')))
This is implemented using the boosting query in
Elasticsearch. Anything you specify with ``.query()`` goes
into the positive section. The negative query and negative
boost portions are specified as the first and second arguments
to ``.demote()``.
.. Note::
Calling this again will overwrite previous ``.demote()``
calls.
|
entailment
|
def facet_raw(self, **kw):
"""
Return a new S instance with raw facet args combined with
existing set.
"""
items = kw.items()
if six.PY3:
items = list(items)
return self._clone(next_step=('facet_raw', items))
|
Return a new S instance with raw facet args combined with
existing set.
|
entailment
|
def suggest(self, name, term, **kwargs):
"""Set suggestion options.
:arg name: The name to use for the suggestions.
:arg term: The term to suggest similar looking terms for.
Additional keyword options:
* ``field`` -- The field to base suggestions upon, defaults to _all
Results will have a ``_suggestions`` property containing the
suggestions for all terms.
.. Note::
Suggestions are only supported since Elasticsearch 0.90.
Calling this multiple times will add multiple suggest clauses to
the query.
"""
return self._clone(next_step=('suggest', (name, term, kwargs)))
|
Set suggestion options.
:arg name: The name to use for the suggestions.
:arg term: The term to suggest similar looking terms for.
Additional keyword options:
* ``field`` -- The field to base suggestions upon, defaults to _all
Results will have a ``_suggestions`` property containing the
suggestions for all terms.
.. Note::
Suggestions are only supported since Elasticsearch 0.90.
Calling this multiple times will add multiple suggest clauses to
the query.
|
entailment
|
def extra(self, **kw):
"""
Return a new S instance with extra args combined with existing
set.
"""
new = self._clone()
actions = ['values_list', 'values_dict', 'order_by', 'query',
'filter', 'facet']
for key, vals in kw.items():
assert key in actions
if hasattr(vals, 'items'):
new.steps.append((key, vals.items()))
else:
new.steps.append((key, vals))
return new
|
Return a new S instance with extra args combined with existing
set.
|
entailment
|
def build_search(self):
"""Builds the Elasticsearch search body represented by this S.
Loop over self.steps to build the search body that will be
sent to Elasticsearch. This returns a Python dict.
If you want the JSON that actually gets sent, then pass the return
value through :py:func:`elasticutils.utils.to_json`.
:returns: a Python dict
"""
filters = []
filters_raw = None
queries = []
query_raw = None
sort = []
dict_fields = set()
list_fields = set()
facets = {}
facets_raw = {}
demote = None
highlight_fields = set()
highlight_options = {}
suggestions = {}
explain = False
as_list = as_dict = False
search_type = None
for action, value in self.steps:
if action == 'order_by':
sort = []
for key in value:
if isinstance(key, string_types) and key.startswith('-'):
sort.append({key[1:]: 'desc'})
else:
sort.append(key)
elif action == 'values_list':
if not value:
list_fields = set()
else:
list_fields |= set(value)
as_list, as_dict = True, False
elif action == 'values_dict':
if not value:
dict_fields = set()
else:
dict_fields |= set(value)
as_list, as_dict = False, True
elif action == 'explain':
explain = value
elif action == 'query':
queries.append(value)
elif action == 'query_raw':
query_raw = value
elif action == 'demote':
# value here is a tuple of (negative_boost, query)
demote = value
elif action == 'filter':
filters.extend(self._process_filters(value))
elif action == 'filter_raw':
filters_raw = value
elif action == 'facet':
# value here is a (args, kwargs) tuple
facets.update(_process_facets(*value))
elif action == 'facet_raw':
facets_raw.update(dict(value))
elif action == 'highlight':
if value[0] == (None,):
highlight_fields = set()
else:
highlight_fields |= set(value[0])
highlight_options.update(value[1])
elif action == 'search_type':
search_type = value
elif action == 'suggest':
suggestions[value[0]] = (value[1], value[2])
elif action in ('es', 'indexes', 'doctypes', 'boost'):
# Ignore these--we use these elsewhere, but want to
# make sure lack of handling it here doesn't throw an
# error.
pass
else:
raise NotImplementedError(action)
qs = {}
# If there's a filters_raw, we use that.
if filters_raw:
qs['filter'] = filters_raw
else:
if len(filters) > 1:
qs['filter'] = {'and': filters}
elif filters:
qs['filter'] = filters[0]
# If there's a query_raw, we use that. Otherwise we use
# whatever we got from query and demote.
if query_raw:
qs['query'] = query_raw
else:
pq = self._process_queries(queries)
if demote is not None:
qs['query'] = {
'boosting': {
'negative': self._process_queries([demote[1]]),
'negative_boost': demote[0]
}
}
if pq:
qs['query']['boosting']['positive'] = pq
elif pq:
qs['query'] = pq
if as_list:
fields = qs['fields'] = list(list_fields) if list_fields else ['*']
elif as_dict:
fields = qs['fields'] = list(dict_fields) if dict_fields else ['*']
else:
fields = set()
if facets:
qs['facets'] = facets
# Hunt for `facet_filter` shells and update those. We use
# None as a shell, so if it's explicitly set to None, then
# we update it.
for facet in facets.values():
if facet.get('facet_filter', 1) is None and 'filter' in qs:
facet['facet_filter'] = qs['filter']
if facets_raw:
qs.setdefault('facets', {}).update(facets_raw)
if sort:
qs['sort'] = sort
if self.start:
qs['from'] = self.start
if self.stop is not None:
qs['size'] = self.stop - self.start
if highlight_fields:
qs['highlight'] = self._build_highlight(
highlight_fields, highlight_options)
if explain:
qs['explain'] = True
for suggestion, (term, kwargs) in six.iteritems(suggestions):
qs.setdefault('suggest', {})[suggestion] = {
'text': term,
'term': {
'field': kwargs.get('field', '_all'),
},
}
self.fields, self.as_list, self.as_dict = fields, as_list, as_dict
self.search_type = search_type
return qs
|
Builds the Elasticsearch search body represented by this S.
Loop over self.steps to build the search body that will be
sent to Elasticsearch. This returns a Python dict.
If you want the JSON that actually gets sent, then pass the return
value through :py:func:`elasticutils.utils.to_json`.
:returns: a Python dict
|
entailment
|
def _build_highlight(self, fields, options):
"""Return the portion of the query that controls highlighting."""
ret = {'fields': dict((f, {}) for f in fields),
'order': 'score'}
ret.update(options)
return ret
|
Return the portion of the query that controls highlighting.
|
entailment
|
def _process_filters(self, filters):
"""Takes a list of filters and returns ES JSON API
:arg filters: list of F, (key, val) tuples, or dicts
:returns: list of ES JSON API filters
"""
rv = []
for f in filters:
if isinstance(f, F):
if f.filters:
rv.extend(self._process_filters(f.filters))
continue
elif isinstance(f, dict):
if six.PY3:
key = list(f.keys())[0]
else:
key = f.keys()[0]
val = f[key]
key = key.strip('_')
if key not in ('or', 'and', 'not', 'filter'):
raise InvalidFieldActionError(
'%s is not a valid connector' % f.keys()[0])
if 'filter' in val:
filter_filters = self._process_filters(val['filter'])
if len(filter_filters) == 1:
filter_filters = filter_filters[0]
rv.append({key: {'filter': filter_filters}})
else:
rv.append({key: self._process_filters(val)})
else:
key, val = f
key, field_action = split_field_action(key)
handler_name = 'process_filter_{0}'.format(field_action)
if field_action and hasattr(self, handler_name):
rv.append(getattr(self, handler_name)(
key, val, field_action))
elif key.strip('_') in ('or', 'and', 'not'):
connector = key.strip('_')
rv.append({connector: self._process_filters(val.items())})
elif field_action is None:
if val is None:
rv.append({'missing': {
'field': key, "null_value": True}})
else:
rv.append({'term': {key: val}})
elif field_action in ('startswith', 'prefix'):
rv.append({'prefix': {key: val}})
elif field_action == 'in':
rv.append({'in': {key: val}})
elif field_action in RANGE_ACTIONS:
rv.append({'range': {key: {field_action: val}}})
elif field_action == 'range':
lower, upper = val
rv.append({'range': {key: {'gte': lower, 'lte': upper}}})
elif field_action == 'distance':
distance, latitude, longitude = val
rv.append({
'geo_distance': {
'distance': distance,
key: [longitude, latitude]
}
})
else:
raise InvalidFieldActionError(
'%s is not a valid field action' % field_action)
return rv
|
Takes a list of filters and returns ES JSON API
:arg filters: list of F, (key, val) tuples, or dicts
:returns: list of ES JSON API filters
|
entailment
|
def _process_query(self, query):
"""Takes a key/val pair and returns the Elasticsearch code for it"""
key, val = query
field_name, field_action = split_field_action(key)
# Boost by name__action overrides boost by name.
boost = self.field_boosts.get(key)
if boost is None:
boost = self.field_boosts.get(field_name)
handler_name = 'process_query_{0}'.format(field_action)
if field_action and hasattr(self, handler_name):
return getattr(self, handler_name)(field_name, val, field_action)
elif field_action in QUERY_ACTION_MAP:
return {
QUERY_ACTION_MAP[field_action]: _boosted_value(
field_name, field_action, key, val, boost)
}
elif field_action == 'query_string':
# query_string has different syntax, so it's handled
# differently.
#
# Note: query_string queries are not boosted with
# .boost()---they're boosted in the query text itself.
return {
'query_string': {'default_field': field_name, 'query': val}
}
elif field_action in RANGE_ACTIONS:
# Ranges are special and have a different syntax, so
# we handle them separately.
return {
'range': {field_name: _boosted_value(
field_action, field_action, key, val, boost)}
}
elif field_action == 'range':
lower, upper = val
value = {
'gte': lower,
'lte': upper,
}
if boost:
value['boost'] = boost
return {'range': {field_name: value}}
raise InvalidFieldActionError(
'%s is not a valid field action' % field_action)
|
Takes a key/val pair and returns the Elasticsearch code for it
|
entailment
|
def _process_queries(self, queries):
"""Takes a list of queries and returns query clause value
:arg queries: list of Q instances
:returns: dict which is the query clause value
"""
# First, let's mush everything into a single Q. Then we can
# parse that into bits.
new_q = Q()
for query in queries:
new_q += query
# Now we have a single Q that needs to be processed.
should_q = [self._process_query(query) for query in new_q.should_q]
must_q = [self._process_query(query) for query in new_q.must_q]
must_not_q = [self._process_query(query) for query in new_q.must_not_q]
if len(must_q) > 1 or (len(should_q) + len(must_not_q) > 0):
# If there's more than one must_q or there are must_not_q
# or should_q, then we need to wrap the whole thing in a
# boolean query.
bool_query = {}
if must_q:
bool_query['must'] = must_q
if should_q:
bool_query['should'] = should_q
if must_not_q:
bool_query['must_not'] = must_not_q
return {'bool': bool_query}
if must_q:
# There's only one must_q query and that's it, so we hoist
# that.
return must_q[0]
return {}
|
Takes a list of queries and returns query clause value
:arg queries: list of Q instances
:returns: dict which is the query clause value
|
entailment
|
def _do_search(self):
"""
Perform the search, then convert that raw format into a
SearchResults instance and return it.
"""
if self._results_cache is None:
response = self.raw()
ResultsClass = self.get_results_class()
results = self.to_python(response.get('hits', {}).get('hits', []))
self._results_cache = ResultsClass(
self.type, response, results, self.fields)
return self._results_cache
|
Perform the search, then convert that raw format into a
SearchResults instance and return it.
|
entailment
|
def get_es(self, default_builder=get_es):
"""Returns the Elasticsearch object to use.
:arg default_builder: The function that takes a bunch of
arguments and generates a elasticsearch Elasticsearch
object.
.. Note::
If you desire special behavior regarding building the
Elasticsearch object for this S, subclass S and override
this method.
"""
# .es() calls are incremental, so we go through them all and
# update bits that are specified.
args = {}
for action, value in self.steps:
if action == 'es':
args.update(**value)
# TODO: store the Elasticsearch on the S if we've already
# created one since we don't need to do it multiple times.
return default_builder(**args)
|
Returns the Elasticsearch object to use.
:arg default_builder: The function that takes a bunch of
arguments and generates a elasticsearch Elasticsearch
object.
.. Note::
If you desire special behavior regarding building the
Elasticsearch object for this S, subclass S and override
this method.
|
entailment
|
def get_indexes(self, default_indexes=DEFAULT_INDEXES):
"""Returns the list of indexes to act on."""
for action, value in reversed(self.steps):
if action == 'indexes':
return list(value)
if self.type is not None:
indexes = self.type.get_index()
if isinstance(indexes, string_types):
indexes = [indexes]
return indexes
return default_indexes
|
Returns the list of indexes to act on.
|
entailment
|
def get_doctypes(self, default_doctypes=DEFAULT_DOCTYPES):
"""Returns the list of doctypes to use."""
for action, value in reversed(self.steps):
if action == 'doctypes':
return list(value)
if self.type is not None:
return [self.type.get_mapping_type_name()]
return default_doctypes
|
Returns the list of doctypes to use.
|
entailment
|
def raw(self):
"""
Build query and passes to Elasticsearch, then returns the raw
format returned.
"""
qs = self.build_search()
es = self.get_es()
index = self.get_indexes()
doc_type = self.get_doctypes()
if doc_type and not index:
raise BadSearch(
'You must specify an index if you are specifying doctypes.')
extra_search_kwargs = {}
if self.search_type:
extra_search_kwargs['search_type'] = self.search_type
hits = es.search(body=qs,
index=self.get_indexes(),
doc_type=self.get_doctypes(),
**extra_search_kwargs)
log.debug('[%s] %s' % (hits['took'], qs))
return hits
|
Build query and passes to Elasticsearch, then returns the raw
format returned.
|
entailment
|
def get_es(self):
"""Returns an `Elasticsearch`.
* If there's an s, then it returns that `Elasticsearch`.
* If the es was provided in the constructor, then it returns
that `Elasticsearch`.
* Otherwise, it creates a new `Elasticsearch` and returns
that.
Override this if that behavior isn't correct for you.
"""
if self.s:
return self.s.get_es()
return self.es or get_es()
|
Returns an `Elasticsearch`.
* If there's an s, then it returns that `Elasticsearch`.
* If the es was provided in the constructor, then it returns
that `Elasticsearch`.
* Otherwise, it creates a new `Elasticsearch` and returns
that.
Override this if that behavior isn't correct for you.
|
entailment
|
def raw(self):
"""
Build query and passes to `Elasticsearch`, then returns the raw
format returned.
"""
es = self.get_es()
params = dict(self.query_params)
mlt_fields = self.mlt_fields or params.pop('mlt_fields', [])
body = self.s.build_search() if self.s else ''
hits = es.mlt(
index=self.index, doc_type=self.doctype, id=self.id,
mlt_fields=mlt_fields, body=body, **params)
log.debug(hits)
return hits
|
Build query and passes to `Elasticsearch`, then returns the raw
format returned.
|
entailment
|
def _do_search(self):
"""
Perform the mlt call, then convert that raw format into a
SearchResults instance and return it.
"""
if self._results_cache is None:
response = self.raw()
results = self.to_python(response.get('hits', {}).get('hits', []))
self._results_cache = DictSearchResults(
self.type, response, results, None)
return self._results_cache
|
Perform the mlt call, then convert that raw format into a
SearchResults instance and return it.
|
entailment
|
def index(cls, document, id_=None, overwrite_existing=True, es=None,
index=None):
"""Adds or updates a document to the index
:arg document: Python dict of key/value pairs representing
the document
.. Note::
This must be serializable into JSON.
:arg id_: the id of the document
.. Note::
If you don't provide an ``id_``, then Elasticsearch
will make up an id for your document and it'll look
like a character name from a Lovecraft novel.
:arg overwrite_existing: if ``True`` overwrites existing documents
of the same ID and doctype
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `cls.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `cls.get_index()`.
.. Note::
If you need the documents available for searches
immediately, make sure to refresh the index by calling
``refresh_index()``.
"""
if es is None:
es = cls.get_es()
if index is None:
index = cls.get_index()
kw = {}
if not overwrite_existing:
kw['op_type'] = 'create'
es.index(index=index, doc_type=cls.get_mapping_type_name(),
body=document, id=id_, **kw)
|
Adds or updates a document to the index
:arg document: Python dict of key/value pairs representing
the document
.. Note::
This must be serializable into JSON.
:arg id_: the id of the document
.. Note::
If you don't provide an ``id_``, then Elasticsearch
will make up an id for your document and it'll look
like a character name from a Lovecraft novel.
:arg overwrite_existing: if ``True`` overwrites existing documents
of the same ID and doctype
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `cls.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `cls.get_index()`.
.. Note::
If you need the documents available for searches
immediately, make sure to refresh the index by calling
``refresh_index()``.
|
entailment
|
def bulk_index(cls, documents, id_field='id', es=None, index=None):
"""Adds or updates a batch of documents.
:arg documents: List of Python dicts representing individual
documents to be added to the index
.. Note::
This must be serializable into JSON.
:arg id_field: The name of the field to use as the document
id. This defaults to 'id'.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `cls.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `cls.get_index()`.
.. Note::
If you need the documents available for searches
immediately, make sure to refresh the index by calling
``refresh_index()``.
"""
if es is None:
es = cls.get_es()
if index is None:
index = cls.get_index()
documents = (dict(d, _id=d[id_field]) for d in documents)
bulk_index(
es,
documents,
index=index,
doc_type=cls.get_mapping_type_name(),
raise_on_error=True
)
|
Adds or updates a batch of documents.
:arg documents: List of Python dicts representing individual
documents to be added to the index
.. Note::
This must be serializable into JSON.
:arg id_field: The name of the field to use as the document
id. This defaults to 'id'.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `cls.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `cls.get_index()`.
.. Note::
If you need the documents available for searches
immediately, make sure to refresh the index by calling
``refresh_index()``.
|
entailment
|
def unindex(cls, id_, es=None, index=None):
"""Removes a particular item from the search index.
:arg id_: The Elasticsearch id for the document to remove from
the index.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `cls.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `cls.get_index()`.
"""
if es is None:
es = cls.get_es()
if index is None:
index = cls.get_index()
es.delete(index=index, doc_type=cls.get_mapping_type_name(), id=id_)
|
Removes a particular item from the search index.
:arg id_: The Elasticsearch id for the document to remove from
the index.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `cls.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `cls.get_index()`.
|
entailment
|
def refresh_index(cls, es=None, index=None):
"""Refreshes the index.
Elasticsearch will update the index periodically
automatically. If you need to see the documents you just
indexed in your search results right now, you should call
`refresh_index` as soon as you're done indexing. This is
particularly helpful for unit tests.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `cls.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `cls.get_index()`.
"""
if es is None:
es = cls.get_es()
if index is None:
index = cls.get_index()
es.indices.refresh(index=index)
|
Refreshes the index.
Elasticsearch will update the index periodically
automatically. If you need to see the documents you just
indexed in your search results right now, you should call
`refresh_index` as soon as you're done indexing. This is
particularly helpful for unit tests.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `cls.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `cls.get_index()`.
|
entailment
|
def monkeypatch_es():
"""Monkey patch for elasticsearch-py 1.0+ to make it work with ES 0.90
1. tweaks elasticsearch.client.bulk to normalize return status codes
.. Note::
We can nix this whe we drop support for ES 0.90.
"""
if _monkeypatched_es:
return
def normalize_bulk_return(fun):
"""Set's "ok" based on "status" if "status" exists"""
@wraps(fun)
def _fixed_bulk(self, *args, **kwargs):
def fix_item(item):
# Go through all the possible sections of item looking
# for 'ok' and adding an additional 'status'.
for key, val in item.items():
if 'ok' in val:
val['status'] = 201
return item
ret = fun(self, *args, **kwargs)
if 'items' in ret:
ret['items'] = [fix_item(item) for item in ret['items']]
return ret
return _fixed_bulk
Elasticsearch.bulk = normalize_bulk_return(Elasticsearch.bulk)
|
Monkey patch for elasticsearch-py 1.0+ to make it work with ES 0.90
1. tweaks elasticsearch.client.bulk to normalize return status codes
.. Note::
We can nix this whe we drop support for ES 0.90.
|
entailment
|
def get_context_data(self, **kwargs):
"""
Returns view context dictionary.
:rtype: dict.
"""
kwargs.update({
'entries': Entry.objects.get_for_tag(
self.kwargs.get('slug', 0)
)
})
return super(EntriesView, self).get_context_data(**kwargs)
|
Returns view context dictionary.
:rtype: dict.
|
entailment
|
def get_password(self, service, username):
"""
Read the password from the file.
"""
assoc = self._generate_assoc(service, username)
service = escape_for_ini(service)
username = escape_for_ini(username)
# load the passwords from the file
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path)
# fetch the password
try:
password_base64 = config.get(service, username).encode()
# decode with base64
password_encrypted = decodebytes(password_base64)
# decrypt the password with associated data
try:
password = self.decrypt(password_encrypted, assoc).decode(
'utf-8')
except ValueError:
# decrypt the password without associated data
password = self.decrypt(password_encrypted).decode('utf-8')
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password
|
Read the password from the file.
|
entailment
|
def set_password(self, service, username, password):
"""Write the password in the file.
"""
if not username:
# https://github.com/jaraco/keyrings.alt/issues/21
raise ValueError("Username cannot be blank.")
if not isinstance(password, string_types):
raise TypeError("Password should be a unicode string, not bytes.")
assoc = self._generate_assoc(service, username)
# encrypt the password
password_encrypted = self.encrypt(password.encode('utf-8'), assoc)
# encode with base64 and add line break to untangle config file
password_base64 = '\n' + encodebytes(password_encrypted).decode()
self._write_config_value(service, username, password_base64)
|
Write the password in the file.
|
entailment
|
def _ensure_file_path(self):
"""
Ensure the storage path exists.
If it doesn't, create it with "go-rwx" permissions.
"""
storage_root = os.path.dirname(self.file_path)
needs_storage_root = storage_root and not os.path.isdir(storage_root)
if needs_storage_root: # pragma: no cover
os.makedirs(storage_root)
if not os.path.isfile(self.file_path):
# create the file without group/world permissions
with open(self.file_path, 'w'):
pass
user_read_write = 0o600
os.chmod(self.file_path, user_read_write)
|
Ensure the storage path exists.
If it doesn't, create it with "go-rwx" permissions.
|
entailment
|
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path)
try:
if not config.remove_option(service, username):
raise PasswordDeleteError("Password not found")
except configparser.NoSectionError:
raise PasswordDeleteError("Password not found")
# update the file
with open(self.file_path, 'w') as config_file:
config.write(config_file)
|
Delete the password for the username of the service.
|
entailment
|
def applicable_models(self):
"""
Returns a list of model classes that subclass Page
and include a "tags" field.
:rtype: list.
"""
Page = apps.get_model('wagtailcore', 'Page')
applicable = []
for model in apps.get_models():
meta = getattr(model, '_meta')
fields = meta.get_all_field_names()
if issubclass(model, Page) and 'tags' in fields:
applicable.append(model)
return applicable
|
Returns a list of model classes that subclass Page
and include a "tags" field.
:rtype: list.
|
entailment
|
def add_relationship_panels(self):
"""
Add edit handler that includes "related" panels to applicable
model classes that don't explicitly define their own edit handler.
"""
from wagtailplus.utils.edit_handlers import add_panel_to_edit_handler
from wagtailplus.wagtailrelations.edit_handlers import RelatedPanel
for model in self.applicable_models:
add_panel_to_edit_handler(model, RelatedPanel, _(u'Related'))
|
Add edit handler that includes "related" panels to applicable
model classes that don't explicitly define their own edit handler.
|
entailment
|
def add_relationship_methods(self):
"""
Adds relationship methods to applicable model classes.
"""
Entry = apps.get_model('wagtailrelations', 'Entry')
@cached_property
def related(instance):
return instance.get_related()
@cached_property
def related_live(instance):
return instance.get_related_live()
@cached_property
def related_with_scores(instance):
return instance.get_related_with_scores()
def get_related(instance):
entry = Entry.objects.get_for_model(instance)[0]
return entry.get_related()
def get_related_live(instance):
entry = Entry.objects.get_for_model(instance)[0]
return entry.get_related_live()
def get_related_with_scores(instance):
try:
entry = Entry.objects.get_for_model(instance)[0]
return entry.get_related_with_scores()
except IntegrityError:
return []
for model in self.applicable_models:
model.add_to_class(
'get_related',
get_related
)
model.add_to_class(
'get_related_live',
get_related_live
)
model.add_to_class(
'get_related_with_scores',
get_related_with_scores
)
model.add_to_class(
'related',
related
)
model.add_to_class(
'related_live',
related_live
)
model.add_to_class(
'related_with_scores',
related_with_scores
)
|
Adds relationship methods to applicable model classes.
|
entailment
|
def ready(self):
"""
Finalizes application configuration.
"""
import wagtailplus.wagtailrelations.signals.handlers
self.add_relationship_panels()
self.add_relationship_methods()
super(WagtailRelationsAppConfig, self).ready()
|
Finalizes application configuration.
|
entailment
|
def applicable_models(self):
"""
Returns a list of model classes that subclass Page.
:rtype: list.
"""
Page = apps.get_model('wagtailcore', 'Page')
applicable = []
for model in apps.get_models():
if issubclass(model, Page):
applicable.append(model)
return applicable
|
Returns a list of model classes that subclass Page.
:rtype: list.
|
entailment
|
def add_rollback_panels(self):
"""
Adds rollback panel to applicable model class's edit handlers.
"""
from wagtailplus.utils.edit_handlers import add_panel_to_edit_handler
from wagtailplus.wagtailrollbacks.edit_handlers import HistoryPanel
for model in self.applicable_models:
add_panel_to_edit_handler(model, HistoryPanel, _(u'History'))
|
Adds rollback panel to applicable model class's edit handlers.
|
entailment
|
def add_rollback_methods():
"""
Adds rollback methods to applicable model classes.
"""
# Modified Page.save_revision method.
def page_rollback(instance, revision_id, user=None, submitted_for_moderation=False, approved_go_live_at=None, changed=True):
old_revision = instance.revisions.get(pk=revision_id)
new_revision = instance.revisions.create(
content_json = old_revision.content_json,
user = user,
submitted_for_moderation = submitted_for_moderation,
approved_go_live_at = approved_go_live_at
)
update_fields = []
instance.latest_revision_created_at = new_revision.created_at
update_fields.append('latest_revision_created_at')
if changed:
instance.has_unpublished_changes = True
update_fields.append('has_unpublished_changes')
if update_fields:
instance.save(update_fields=update_fields)
logger.info(
"Page edited: \"%s\" id=%d revision_id=%d",
instance.title,
instance.id,
new_revision.id
)
if submitted_for_moderation:
logger.info(
"Page submitted for moderation: \"%s\" id=%d revision_id=%d",
instance.title,
instance.id,
new_revision.id
)
return new_revision
Page = apps.get_model('wagtailcore', 'Page')
Page.add_to_class('rollback', page_rollback)
|
Adds rollback methods to applicable model classes.
|
entailment
|
def ready(self):
"""
Finalizes application configuration.
"""
self.add_rollback_panels()
self.add_rollback_methods()
super(WagtailRollbacksAppConfig, self).ready()
|
Finalizes application configuration.
|
entailment
|
def get_related(page):
"""
Returns list of related Entry instances for specified page.
:param page: the page instance.
:rtype: list.
"""
related = []
entry = Entry.get_for_model(page)
if entry:
related = entry.related
return related
|
Returns list of related Entry instances for specified page.
:param page: the page instance.
:rtype: list.
|
entailment
|
def get_related_entry_admin_url(entry):
"""
Returns admin URL for specified entry instance.
:param entry: the entry instance.
:return: str.
"""
namespaces = {
Document: 'wagtaildocs:edit',
Link: 'wagtaillinks:edit',
Page: 'wagtailadmin_pages:edit',
}
for cls, url in namespaces.iteritems():
if issubclass(entry.content_type.model_class(), cls):
return urlresolvers.reverse(url, args=(entry.object_id,))
return ''
|
Returns admin URL for specified entry instance.
:param entry: the entry instance.
:return: str.
|
entailment
|
def get_related_with_scores(page):
"""
Returns list of related tuples (Entry instance, score) for
specified page.
:param page: the page instance.
:rtype: list.
"""
related = []
entry = Entry.get_for_model(page)
if entry:
related = entry.related_with_scores
return related
|
Returns list of related tuples (Entry instance, score) for
specified page.
:param page: the page instance.
:rtype: list.
|
entailment
|
def get_password(self, service, username):
"""Get password of the username for the service
"""
init_part = self._keyring.get_password(service, username)
if init_part:
parts = [init_part]
i = 1
while True:
next_part = self._keyring.get_password(
service,
'%s{{part_%d}}' % (username, i))
if next_part:
parts.append(next_part)
i += 1
else:
break
return ''.join(parts)
return None
|
Get password of the username for the service
|
entailment
|
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
segments = range(0, len(password), self._max_password_size)
password_parts = [
password[i:i + self._max_password_size] for i in segments]
for i, password_part in enumerate(password_parts):
curr_username = username
if i > 0:
curr_username += '{{part_%d}}' % i
self._keyring.set_password(service, curr_username, password_part)
|
Set password for the username of the service
|
entailment
|
def expand_db_attributes(attrs, for_editor):
"""
Given a dictionary of attributes, find the corresponding link instance and
return its HTML representation.
:param attrs: dictionary of link attributes.
:param for_editor: whether or not HTML is for editor.
:rtype: str.
"""
try:
editor_attrs = ''
link = Link.objects.get(id=attrs['id'])
if for_editor:
editor_attrs = 'data-linktype="link" data-id="{0}" '.format(
link.id
)
return '<a {0}href="{1}" title="{2}">'.format(
editor_attrs,
escape(link.get_absolute_url()),
link.title
)
except Link.DoesNotExist:
return '<a>'
|
Given a dictionary of attributes, find the corresponding link instance and
return its HTML representation.
:param attrs: dictionary of link attributes.
:param for_editor: whether or not HTML is for editor.
:rtype: str.
|
entailment
|
def crypter(self):
"""The actual keyczar crypter"""
if not hasattr(self, '_crypter'):
# initialise the Keyczar keysets
if not self.keyset_location:
raise ValueError('No encrypted keyset location!')
reader = keyczar.readers.CreateReader(self.keyset_location)
if self.encrypting_keyset_location:
encrypting_keyczar = keyczar.Crypter.Read(
self.encrypting_keyset_location)
reader = keyczar.readers.EncryptedReader(reader,
encrypting_keyczar)
self._crypter = keyczar.Crypter(reader)
return self._crypter
|
The actual keyczar crypter
|
entailment
|
def index_objects(mapping_type, ids, chunk_size=100, es=None, index=None):
"""Index documents of a specified mapping type.
This allows for asynchronous indexing.
If a mapping_type extends Indexable, you can add a ``post_save``
hook for the model that it's based on like this::
@receiver(dbsignals.post_save, sender=MyModel)
def update_in_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.index_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to index
:arg chunk_size: the size of the chunk for bulk indexing
.. Note::
The default chunk_size is 100. The number of documents you
can bulk index at once depends on the size of the
documents.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
"""
if settings.ES_DISABLED:
return
log.debug('Indexing objects {0}-{1}. [{2}]'.format(
ids[0], ids[-1], len(ids)))
# Get the model this mapping type is based on.
model = mapping_type.get_model()
# Retrieve all the objects that we're going to index and do it in
# bulk.
for id_list in chunked(ids, chunk_size):
documents = []
for obj in model.objects.filter(id__in=id_list):
try:
documents.append(mapping_type.extract_document(obj.id, obj))
except Exception as exc:
log.exception('Unable to extract document {0}: {1}'.format(
obj, repr(exc)))
if documents:
mapping_type.bulk_index(documents, id_field='id', es=es, index=index)
|
Index documents of a specified mapping type.
This allows for asynchronous indexing.
If a mapping_type extends Indexable, you can add a ``post_save``
hook for the model that it's based on like this::
@receiver(dbsignals.post_save, sender=MyModel)
def update_in_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.index_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to index
:arg chunk_size: the size of the chunk for bulk indexing
.. Note::
The default chunk_size is 100. The number of documents you
can bulk index at once depends on the size of the
documents.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
|
entailment
|
def unindex_objects(mapping_type, ids, es=None, index=None):
"""Remove documents of a specified mapping_type from the index.
This allows for asynchronous deleting.
If a mapping_type extends Indexable, you can add a ``pre_delete``
hook for the model that it's based on like this::
@receiver(dbsignals.pre_delete, sender=MyModel)
def remove_from_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.unindex_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to remove
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
"""
if settings.ES_DISABLED:
return
for id_ in ids:
mapping_type.unindex(id_, es=es, index=index)
|
Remove documents of a specified mapping_type from the index.
This allows for asynchronous deleting.
If a mapping_type extends Indexable, you can add a ``pre_delete``
hook for the model that it's based on like this::
@receiver(dbsignals.pre_delete, sender=MyModel)
def remove_from_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.unindex_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to remove
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
|
entailment
|
def get_json(self, link):
"""
Returns specified link instance as JSON.
:param link: the link instance.
:rtype: JSON.
"""
return json.dumps({
'id': link.id,
'title': link.title,
'url': link.get_absolute_url(),
'edit_link': reverse(
'{0}:edit'.format(self.url_namespace),
kwargs = {'pk': link.pk}
),
})
|
Returns specified link instance as JSON.
:param link: the link instance.
:rtype: JSON.
|
entailment
|
def _create_cipher(self, password, salt, IV):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Cipher import AES
pw = PBKDF2(password, salt, dkLen=self.block_size)
return AES.new(pw[:self.block_size], AES.MODE_CFB, IV)
|
Create the cipher object to encrypt or decrypt a payload.
|
entailment
|
def _init_file(self):
"""
Initialize a new password file and set the reference password.
"""
self.keyring_key = self._get_new_password()
# set a reference password, used to check that the password provided
# matches for subsequent checks.
self.set_password('keyring-setting',
'password reference',
'password reference value')
self._write_config_value('keyring-setting',
'scheme',
self.scheme)
self._write_config_value('keyring-setting',
'version',
self.version)
|
Initialize a new password file and set the reference password.
|
entailment
|
def _check_file(self):
"""
Check if the file exists and has the expected password reference.
"""
if not os.path.exists(self.file_path):
return False
self._migrate()
config = configparser.RawConfigParser()
config.read(self.file_path)
try:
config.get(
escape_for_ini('keyring-setting'),
escape_for_ini('password reference'),
)
except (configparser.NoSectionError, configparser.NoOptionError):
return False
try:
self._check_scheme(config)
except AttributeError:
# accept a missing scheme
return True
return self._check_version(config)
|
Check if the file exists and has the expected password reference.
|
entailment
|
def _check_version(self, config):
"""
check for a valid version
an existing scheme implies an existing version as well
return True, if version is valid, and False otherwise
"""
try:
self.file_version = config.get(
escape_for_ini('keyring-setting'),
escape_for_ini('version'),
)
except (configparser.NoSectionError, configparser.NoOptionError):
return False
return True
|
check for a valid version
an existing scheme implies an existing version as well
return True, if version is valid, and False otherwise
|
entailment
|
def _unlock(self):
"""
Unlock this keyring by getting the password for the keyring from the
user.
"""
self.keyring_key = getpass.getpass(
'Please enter password for encrypted keyring: ')
try:
ref_pw = self.get_password('keyring-setting', 'password reference')
assert ref_pw == 'password reference value'
except AssertionError:
self._lock()
raise ValueError("Incorrect Password")
|
Unlock this keyring by getting the password for the keyring from the
user.
|
entailment
|
def _escape_char(c):
"Single char escape. Return the char, escaped if not already legal"
if isinstance(c, int):
c = _unichr(c)
return c if c in LEGAL_CHARS else ESCAPE_FMT % ord(c)
|
Single char escape. Return the char, escaped if not already legal
|
entailment
|
def unescape(value):
"""
Inverse of escape.
"""
pattern = ESCAPE_FMT.replace('%02X', '(?P<code>[0-9A-Fa-f]{2})')
# the pattern must be bytes to operate on bytes
pattern_bytes = pattern.encode('ascii')
re_esc = re.compile(pattern_bytes)
return re_esc.sub(_unescape_code, value.encode('ascii')).decode('utf-8')
|
Inverse of escape.
|
entailment
|
def _find_passwords(self, service, username, deleting=False):
"""Get password of the username for the service
"""
passwords = []
service = self._safe_string(service)
username = self._safe_string(username)
for attrs_tuple in (('username', 'service'), ('user', 'domain')):
attrs = GnomeKeyring.Attribute.list_new()
GnomeKeyring.Attribute.list_append_string(
attrs, attrs_tuple[0], username)
GnomeKeyring.Attribute.list_append_string(
attrs, attrs_tuple[1], service)
result, items = GnomeKeyring.find_items_sync(
GnomeKeyring.ItemType.NETWORK_PASSWORD, attrs)
if result == GnomeKeyring.Result.OK:
passwords += items
elif deleting:
if result == GnomeKeyring.Result.CANCELLED:
raise PasswordDeleteError("Cancelled by user")
elif result != GnomeKeyring.Result.NO_MATCH:
raise PasswordDeleteError(result.value_name)
return passwords
|
Get password of the username for the service
|
entailment
|
def get_password(self, service, username):
"""Get password of the username for the service
"""
items = self._find_passwords(service, username)
if not items:
return None
secret = items[0].secret
return (
secret
if isinstance(secret, six.text_type) else
secret.decode('utf-8')
)
|
Get password of the username for the service
|
entailment
|
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
service = self._safe_string(service)
username = self._safe_string(username)
password = self._safe_string(password)
attrs = GnomeKeyring.Attribute.list_new()
GnomeKeyring.Attribute.list_append_string(attrs, 'username', username)
GnomeKeyring.Attribute.list_append_string(attrs, 'service', service)
GnomeKeyring.Attribute.list_append_string(
attrs, 'application', 'python-keyring')
result = GnomeKeyring.item_create_sync(
self.keyring_name, GnomeKeyring.ItemType.NETWORK_PASSWORD,
"Password for '%s' on '%s'" % (username, service),
attrs, password, True)[0]
if result == GnomeKeyring.Result.CANCELLED:
# The user pressed "Cancel" when prompted to unlock their keyring.
raise PasswordSetError("Cancelled by user")
elif result != GnomeKeyring.Result.OK:
raise PasswordSetError(result.value_name)
|
Set password for the username of the service
|
entailment
|
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
items = self._find_passwords(service, username, deleting=True)
if not items:
raise PasswordDeleteError("Password not found")
for current in items:
result = GnomeKeyring.item_delete_sync(current.keyring,
current.item_id)
if result == GnomeKeyring.Result.CANCELLED:
raise PasswordDeleteError("Cancelled by user")
elif result != GnomeKeyring.Result.OK:
raise PasswordDeleteError(result.value_name)
|
Delete the password for the username of the service.
|
entailment
|
def _safe_string(self, source, encoding='utf-8'):
"""Convert unicode to string as gnomekeyring barfs on unicode"""
if not isinstance(source, str):
return source.encode(encoding)
return str(source)
|
Convert unicode to string as gnomekeyring barfs on unicode
|
entailment
|
def get_context_data(self, **kwargs):
"""
Returns context dictionary for view.
:rtype: dict.
"""
kwargs.update({
'view': self,
'email_form': EmailLinkForm(),
'external_form': ExternalLinkForm(),
'type_email': Link.LINK_TYPE_EMAIL,
'type_external': Link.LINK_TYPE_EXTERNAL,
})
# If a form has been submitted, update context with
# the submitted form value.
if 'form' in kwargs:
submitted_form = kwargs.pop('form')
if isinstance(submitted_form, EmailLinkForm):
kwargs.update({'email_form': submitted_form})
elif isinstance(submitted_form, ExternalLinkForm):
kwargs.update({'external_form': submitted_form})
return kwargs
|
Returns context dictionary for view.
:rtype: dict.
|
entailment
|
def post(self, request, *args, **kwargs):
"""
Returns POST response.
:param request: the request instance.
:rtype: django.http.HttpResponse.
"""
form = None
link_type = int(request.POST.get('link_type', 0))
if link_type == Link.LINK_TYPE_EMAIL:
form = EmailLinkForm(**self.get_form_kwargs())
elif link_type == Link.LINK_TYPE_EXTERNAL:
form = ExternalLinkForm(**self.get_form_kwargs())
if form:
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
else:
raise Http404()
|
Returns POST response.
:param request: the request instance.
:rtype: django.http.HttpResponse.
|
entailment
|
def get_form_class(self):
"""
Returns form class to use in the view.
:rtype: django.forms.ModelForm.
"""
if self.object.link_type == Link.LINK_TYPE_EMAIL:
return EmailLinkForm
elif self.object.link_type == Link.LINK_TYPE_EXTERNAL:
return ExternalLinkForm
return None
|
Returns form class to use in the view.
:rtype: django.forms.ModelForm.
|
entailment
|
def get_password(self, service, username):
"""Get password of the username for the service
"""
try:
# fetch the password
key = self._key_for_service(service)
hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key)
password_saved = winreg.QueryValueEx(hkey, username)[0]
password_base64 = password_saved.encode('ascii')
# decode with base64
password_encrypted = base64.decodestring(password_base64)
# decrypted the password
password = _win_crypto.decrypt(password_encrypted).decode('utf-8')
except EnvironmentError:
password = None
return password
|
Get password of the username for the service
|
entailment
|
def set_password(self, service, username, password):
"""Write the password to the registry
"""
# encrypt the password
password_encrypted = _win_crypto.encrypt(password.encode('utf-8'))
# encode with base64
password_base64 = base64.encodestring(password_encrypted)
# encode again to unicode
password_saved = password_base64.decode('ascii')
# store the password
key_name = self._key_for_service(service)
hkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, key_name)
winreg.SetValueEx(hkey, username, 0, winreg.REG_SZ, password_saved)
|
Write the password to the registry
|
entailment
|
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
try:
key_name = self._key_for_service(service)
hkey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, key_name, 0,
winreg.KEY_ALL_ACCESS)
winreg.DeleteValue(hkey, username)
winreg.CloseKey(hkey)
except WindowsError:
e = sys.exc_info()[1]
raise PasswordDeleteError(e)
self._delete_key_if_empty(service)
|
Delete the password for the username of the service.
|
entailment
|
def encrypt(self, password):
"""Encrypt the password.
"""
if not password or not self._crypter:
return password or b''
return self._crypter.encrypt(password)
|
Encrypt the password.
|
entailment
|
def decrypt(self, password_encrypted):
"""Decrypt the password.
"""
if not password_encrypted or not self._crypter:
return password_encrypted or b''
return self._crypter.decrypt(password_encrypted)
|
Decrypt the password.
|
entailment
|
def _open(self, mode='r'):
"""Open the password file in the specified mode
"""
open_file = None
writeable = 'w' in mode or 'a' in mode or '+' in mode
try:
# NOTE: currently the MemOpener does not split off any filename
# which causes errors on close()
# so we add a dummy name and open it separately
if (self.filename.startswith('mem://')
or self.filename.startswith('ram://')):
open_file = fs.opener.fsopendir(self.filename).open('kr.cfg',
mode)
else:
if not hasattr(self, '_pyfs'):
# reuse the pyfilesystem and path
self._pyfs, self._path = fs.opener.opener.parse(
self.filename, writeable=writeable)
# cache if permitted
if self._cache_timeout is not None:
self._pyfs = fs.remote.CacheFS(
self._pyfs, cache_timeout=self._cache_timeout)
open_file = self._pyfs.open(self._path, mode)
except fs.errors.ResourceNotFoundError:
if self._can_create:
segments = fs.opener.opener.split_segments(self.filename)
if segments:
# this seems broken, but pyfilesystem uses it, so we must
fs_name, credentials, url1, url2, path = segments.groups()
assert fs_name, 'Should be a remote filesystem'
host = ''
# allow for domain:port
if ':' in url2:
split_url2 = url2.split('/', 1)
if len(split_url2) > 1:
url2 = split_url2[1]
else:
url2 = ''
host = split_url2[0]
pyfs = fs.opener.opener.opendir(
'%s://%s' % (fs_name, host))
# cache if permitted
if self._cache_timeout is not None:
pyfs = fs.remote.CacheFS(
pyfs, cache_timeout=self._cache_timeout)
# NOTE: fs.path.split does not function in the same
# way os os.path.split... at least under windows
url2_path, url2_filename = os.path.split(url2)
if url2_path and not pyfs.exists(url2_path):
pyfs.makedir(url2_path, recursive=True)
else:
# assume local filesystem
full_url = fs.opener._expand_syspath(self.filename)
# NOTE: fs.path.split does not function in the same
# way os os.path.split... at least under windows
url2_path, url2 = os.path.split(full_url)
pyfs = fs.osfs.OSFS(url2_path)
try:
# reuse the pyfilesystem and path
self._pyfs = pyfs
self._path = url2
return pyfs.open(url2, mode)
except fs.errors.ResourceNotFoundError:
if writeable:
raise
else:
pass
# NOTE: ignore read errors as the underlying caller can fail safely
if writeable:
raise
else:
pass
return open_file
|
Open the password file in the specified mode
|
entailment
|
def config(self):
"""load the passwords from the config file
"""
if not hasattr(self, '_config'):
raw_config = configparser.RawConfigParser()
f = self._open()
if f:
raw_config.readfp(f)
f.close()
self._config = raw_config
return self._config
|
load the passwords from the config file
|
entailment
|
def get_password(self, service, username):
"""Read the password from the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# fetch the password
try:
password_base64 = self.config.get(service, username).encode()
# decode with base64
password_encrypted = base64.decodestring(password_base64)
# decrypted the password
password = self.decrypt(password_encrypted).decode('utf-8')
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password
|
Read the password from the file.
|
entailment
|
def set_password(self, service, username, password):
"""Write the password in the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# encrypt the password
password = password or ''
password_encrypted = self.encrypt(password.encode('utf-8'))
# encode with base64
password_base64 = base64.encodestring(password_encrypted).decode()
# write the modification
if not self.config.has_section(service):
self.config.add_section(service)
self.config.set(service, username, password_base64)
config_file = UnicodeWriterAdapter(self._open('w'))
self.config.write(config_file)
config_file.close()
|
Write the password in the file.
|
entailment
|
def get_queryset(self):
"""
Returns queryset limited to categories with live Entry instances.
:rtype: django.db.models.query.QuerySet.
"""
queryset = super(LiveEntryCategoryManager, self).get_queryset()
return queryset.filter(tag__in=[
entry_tag.tag
for entry_tag
in EntryTag.objects.filter(entry__live=True)
])
|
Returns queryset limited to categories with live Entry instances.
:rtype: django.db.models.query.QuerySet.
|
entailment
|
def get_for_model(self, model):
"""
Returns tuple (Entry instance, created) for specified
model instance.
:rtype: wagtailplus.wagtailrelations.models.Entry.
"""
return self.get_or_create(
content_type = ContentType.objects.get_for_model(model),
object_id = model.pk
)
|
Returns tuple (Entry instance, created) for specified
model instance.
:rtype: wagtailplus.wagtailrelations.models.Entry.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.