sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def run(self):
"""
Copy libraries from the bin directory and place them as appropriate
"""
self.announce("Moving library files", level=3)
# We have already built the libraries in the previous build_ext step
self.skip_build = True
bin_dir = self.distribution.bin_dir
libs = [os.path.join(bin_dir, _lib) for _lib in
os.listdir(bin_dir) if
os.path.isfile(os.path.join(bin_dir, _lib)) and
os.path.splitext(_lib)[1] in [".dll", ".so"]
and not (_lib.startswith("python") or _lib.startswith("bpy"))]
for lib in libs:
shutil.move(lib, os.path.join(self.build_dir,
os.path.basename(lib)))
# Mark the libs for installation, adding them to
# distribution.data_files seems to ensure that setuptools' record
# writer appends them to installed-files.txt in the package's egg-info
#
# Also tried adding the libraries to the distribution.libraries list,
# but that never seemed to add them to the installed-files.txt in the
# egg-info, and the online recommendation seems to be adding libraries
# into eager_resources in the call to setup(), which I think puts them
# in data_files anyways.
#
# What is the best way?
self.distribution.data_files = [os.path.join(self.install_dir,
os.path.basename(lib))
for lib in libs]
# Must be forced to run after adding the libs to data_files
self.distribution.run_command("install_data")
super().run()
|
Copy libraries from the bin directory and place them as appropriate
|
entailment
|
def run(self):
"""
Copy the required directory to the build directory and super().run()
"""
self.announce("Moving scripts files", level=3)
self.skip_build = True
bin_dir = self.distribution.bin_dir
scripts_dirs = [os.path.join(bin_dir, _dir) for _dir in
os.listdir(bin_dir) if
os.path.isdir(os.path.join(bin_dir, _dir))]
for scripts_dir in scripts_dirs:
dst_dir = os.path.join(self.build_dir,
os.path.basename(scripts_dir))
# Mostly in case of weird things happening during build
if os.path.exists(dst_dir):
if os.path.isdir(dst_dir):
shutil.rmtree(dst_dir)
elif os.path.isfile(dst_dir):
os.remove(dst_dir)
shutil.move(scripts_dir,
os.path.join(self.build_dir,
os.path.basename(scripts_dir)))
# Mark the scripts for installation, adding them to
# distribution.scripts seems to ensure that the setuptools' record
# writer appends them to installed-files.txt in the package's egg-info
self.distribution.scripts = scripts_dirs
super().run()
|
Copy the required directory to the build directory and super().run()
|
entailment
|
def run(self):
"""
Perform build_cmake before doing the 'normal' stuff
"""
for extension in self.extensions:
if extension.name == "bpy":
self.build_cmake(extension)
super().run()
|
Perform build_cmake before doing the 'normal' stuff
|
entailment
|
def build_cmake(self, extension: Extension):
"""
The steps required to build the extension
"""
# We import the setup_requires modules here because if we import them
# at the top this script will always fail as they won't be present
from git import Repo as GitRepo
from svn.remote import RemoteClient as SvnRepo
self.announce("Preparing the build environment", level=3)
blender_dir = os.path.join(BLENDERPY_DIR, "blender")
build_dir = pathlib.Path(self.build_temp)
extension_path = pathlib.Path(self.get_ext_fullpath(extension.name))
os.makedirs(blender_dir, exist_ok=True)
os.makedirs(str(build_dir), exist_ok=True)
os.makedirs(str(extension_path.parent.absolute()), exist_ok=True)
# Now that the necessary directories are created, ensure that OS
# specific steps are performed; a good example is checking on linux
# that the required build libraries are in place.
os_build_args = []
# Have to find the correct release tag to checkout here, as potentially
# master may not be the correct one for this Python version. We use svn
# to find whether or not master, or a specific tag supports the
# current python version
if sys.platform == "win32": # Windows only steps
import winreg
vs_versions = []
for version in [12, 14, 15]:
try:
winreg.OpenKey(winreg.HKEY_CLASSES_ROOT,
f"VisualStudio.DTE.{version}.0")
except:
pass
else:
vs_versions.append(version)
if not vs_versions:
raise Exception("Windows users must have Visual Studio 2013 "
"or later installed")
if max(vs_versions) == 15:
os_build_args += ["-G", f"Visual Studio 15 2017"
f"{' Win64' if BITS == 64 else ''}"]
elif max(vs_versions) == 14:
os_build_args += ["-G", f"Visual Studio 14 2015"
f"{' Win64' if BITS == 64 else ''}"]
elif max(vs_versions) == 12:
os_build_args += ["-G", f"Visual Studio 12 2013"
f"{' Win64' if BITS == 64 else ''}"]
# TODO: Clean up here
svn_lib_options += [f"win{64 if BITS == 64 else 'dows'}_vc{version}"
for version in vs_versions]
blender_svn_repo = SvnRepo(svn_url)
os.makedirs(svn_dir, exist_ok=True)
self.announce(f"Checking out svn libs from {svn_url}", level=3)
try:
blender_svn_repo.checkout(svn_dir)
except Exception as e:
self.warn("Windows users must have the svn executable "
"available from the command line")
self.warn("Please install Tortoise SVN with \"command line "
"client tools\" as described here")
self.warn("https://stackoverflow.com/questions/1625406/using-"
"tortoisesvn-via-the-command-line")
raise e
elif sys.platform == "linux": # Linux only steps
# TODO: Test linux environment, issue #1
pass
elif sys.platform == "darwin": # MacOS only steps
# TODO: Test MacOS environment, issue #2
pass
# Perform relatively common build steps
# TODO: if blender desired version, then see if we can install that
# Otherwise fail, if no desired version, find the latest version that
# supports our python and install that
git_repo = GitRepo(GIT_BASE_URL)
svn_repo = SvnRepo(SVN_BASE_URL)
if BLENDER_DESIRED_VERSION:
match = BLENDER_VERSION_REGEX.match(BLENDER_DESIRED_VERSION)
if match:
# We have a blender version that conforms to the naming scheme
# now to see if it actually exists in git and svn
if match.group(0) in git_repo.tags:
# The version was tagged in the git repository
# now, format the version to match the svn versioning
# scheme...
svn_version_tag = (f"blender-{match.group(1)}"
f"{match.group(2) if not match.group(2).startswith("-rc")}-release")
svn_tag_repo = SvnRepo(os.path.join(SVN_BASE_URL, SVN_TAGS))
if svn_version_tag in svn_tag_repo.list():
# The version was released in svn and we found it
# Now, is it compatible with our OS and python version?
else:
raise Exception(f"{BLENDER_DESIRED_VERSION} was found "
f"in the git repository but not the "
f"svn repository.")
else:
raise Exception(f"The provided version "
f"{BLENDER_DESIRED_VERSION} does not "
f"exist; please check "
f"https://git.blender.org/gitweb/"
f"gitweb.cgi/blender.git/tags for a list "
f"of valid Blender releases")
else:
# The blender version did not conform to the naming scheme
# fail and notify the user how to list the version
raise Exception(f"The provided version "
f"{BLENDER_DESIRED_VERSION} did not match "
f"Blender's naming scheme. Please list your "
f"desired version as 'v' followed by a digit, "
f"followed by a period, followed by two "
f"digits and either 'a', 'b', 'c' or '-rc' "
f"(versions using '-rc' can optionally add "
f"a number which specifies which release "
f"candidate they want to install) such that "
f"the version looks like the following: "
f"v2.74-rc2")
else:
if sys.version_info >= (3, 6):
# we can get from svn and git master branch
else:
# we must find a compatible version
self.announce(f"Cloning Blender source from {BLENDER_GIT_REPO_URL}",
level=3)
try:
blender_git_repo = GitRepo(blender_dir)
except:
GitRepo.clone_from(BLENDER_GIT_REPO_URL, blender_dir)
blender_git_repo = GitRepo(blender_dir)
finally:
blender_git_repo.heads.master.checkout()
blender_git_repo.remotes.origin.pull()
self.announce(f"Updating Blender git submodules", level=3)
blender_git_repo.git.submodule('update', '--init', '--recursive')
for submodule in blender_git_repo.submodules:
submodule_repo = submodule.module()
submodule_repo.heads.master.checkout()
submodule_repo.remotes.origin.pull()
self.announce("Configuring cmake project", level=3)
self.spawn(['cmake', '-H'+blender_dir, '-B'+self.build_temp,
'-DWITH_PLAYER=OFF', '-DWITH_PYTHON_INSTALL=OFF',
'-DWITH_PYTHON_MODULE=ON',
f"-DPYTHON_VERSION="
f"{sys.version_info[0]}.{sys.version_info[1]}"]
+ os_build_args)
self.announce("Building binaries", level=3)
self.spawn(["cmake", "--build", self.build_temp, "--target", "INSTALL",
"--config", "Release"])
# Build finished, now copy the files into the copy directory
# The copy directory is the parent directory of the extension (.pyd)
self.announce("Moving Blender python module", level=3)
bin_dir = os.path.join(str(build_dir), 'bin', 'Release')
self.distribution.bin_dir = bin_dir
bpy_path = [os.path.join(bin_dir, _bpy) for _bpy in
os.listdir(bin_dir) if
os.path.isfile(os.path.join(bin_dir, _bpy)) and
os.path.splitext(_bpy)[0].startswith('bpy') and
os.path.splitext(_bpy)[1] in [".pyd", ".so"]][0]
shutil.move(str(bpy_path), str(extension_path))
|
The steps required to build the extension
|
entailment
|
def get(self, addresses):
"""
:type addresses: list[str]
:param addresses: (list[str]) List of addresses to retrieve their reverse dns
Retrieve the current configured ReverseDns entries
:return: (list) List containing the current ReverseDns Addresses
"""
request = self._call(GetReverseDns.GetReverseDns, IPs=addresses)
response = request.commit()
return response['Value']
|
:type addresses: list[str]
:param addresses: (list[str]) List of addresses to retrieve their reverse dns
Retrieve the current configured ReverseDns entries
:return: (list) List containing the current ReverseDns Addresses
|
entailment
|
def set(self, address, host_name):
"""
Assign one or more PTR record to a single IP Address
:type address: str
:type host_name: list[str]
:param address: (str) The IP address to configure
:param host_name: (list[str]) The list of strings representing PTR records
:return: (bool) True in case of success, False in case of failure
"""
request = self._call(SetEnqueueSetReverseDns.SetEnqueueSetReverseDns, IP=address, Hosts=host_name)
response = request.commit()
return response['Success']
|
Assign one or more PTR record to a single IP Address
:type address: str
:type host_name: list[str]
:param address: (str) The IP address to configure
:param host_name: (list[str]) The list of strings representing PTR records
:return: (bool) True in case of success, False in case of failure
|
entailment
|
def reset(self, addresses):
"""
Remove all PTR records from the given address
:type addresses: List[str]
:param addresses: (List[str]) The IP Address to reset
:return: (bool) True in case of success, False in case of failure
"""
request = self._call(SetEnqueueResetReverseDns.SetEnqueueResetReverseDns, IPs=addresses)
response = request.commit()
return response['Success']
|
Remove all PTR records from the given address
:type addresses: List[str]
:param addresses: (List[str]) The IP Address to reset
:return: (bool) True in case of success, False in case of failure
|
entailment
|
def create(self, healthCheckNotification, instance, ipAddressResourceId, name, notificationContacts, rules,
loadBalancerClassOfServiceID=1, *args, **kwargs):
"""
:type healthCheckNotification: bool
:type instance: list[Instance]
:type ipAddressResourceId: list[int]
:type loadBalancerClassOfServiceID: int
:type name: str
:type notificationContacts: NotificationContacts or list[NotificationContact]
:type rules: Rules
:param healthCheckNotification: Enable or disable notifications
:param instance: List of balanced IP Addresses (VM or server)
:param ipAddressResourceId: ID of the IP Address resource of the Load Balancer
:param loadBalancerClassOfServiceID: default 1
:param name: Name of the Load Balancer
:param notificationContacts: Nullable if notificationContacts is false
:param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service
"""
response = self._call(method=SetEnqueueLoadBalancerCreation,
healthCheckNotification=healthCheckNotification,
instance=instance,
ipAddressResourceId=ipAddressResourceId,
name=name,
notificationContacts=notificationContacts,
rules=rules,
loadBalancerClassOfServiceID=loadBalancerClassOfServiceID,
*args, **kwargs)
|
:type healthCheckNotification: bool
:type instance: list[Instance]
:type ipAddressResourceId: list[int]
:type loadBalancerClassOfServiceID: int
:type name: str
:type notificationContacts: NotificationContacts or list[NotificationContact]
:type rules: Rules
:param healthCheckNotification: Enable or disable notifications
:param instance: List of balanced IP Addresses (VM or server)
:param ipAddressResourceId: ID of the IP Address resource of the Load Balancer
:param loadBalancerClassOfServiceID: default 1
:param name: Name of the Load Balancer
:param notificationContacts: Nullable if notificationContacts is false
:param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service
|
entailment
|
def get_notifications(self, startDate, endDate, loadBalancerID, loadBalancerRuleID):
"""
Get the load balancer notifications for a specific rule within a specifying window time frame
:type startDate: datetime
:type endDate: datetime
:type loadBalancerID: int
:type loadBalancerRuleID: int
:param startDate: From Date
:param endDate: To Date
:param loadBalancerID: ID of the Laod Balancer
:param loadBalancerRuleID: ID of the Load Balancer Rule
"""
return self._call(GetLoadBalancerNotifications, startDate=startDate, endDate=endDate,
loadBalancerID=loadBalancerID, loadBalancerRuleID=loadBalancerRuleID)
|
Get the load balancer notifications for a specific rule within a specifying window time frame
:type startDate: datetime
:type endDate: datetime
:type loadBalancerID: int
:type loadBalancerRuleID: int
:param startDate: From Date
:param endDate: To Date
:param loadBalancerID: ID of the Laod Balancer
:param loadBalancerRuleID: ID of the Load Balancer Rule
|
entailment
|
def get_oauth_authcfg(authcfg_id=AUTHCFG_ID):
"""Check if the given authcfg_id (or the default) exists, and if it's valid
OAuth2, return the configuration or None"""
# Handle empty strings
if not authcfg_id:
authcfg_id = AUTHCFG_ID
configs = auth_manager().availableAuthMethodConfigs()
if authcfg_id in configs \
and configs[authcfg_id].isValid() \
and configs[authcfg_id].method() == 'OAuth2':
return configs[authcfg_id]
return None
|
Check if the given authcfg_id (or the default) exists, and if it's valid
OAuth2, return the configuration or None
|
entailment
|
def setup_oauth(username, password, basemaps_token_uri, authcfg_id=AUTHCFG_ID, authcfg_name=AUTHCFG_NAME):
"""Setup oauth configuration to access the BCS API,
return authcfg_id on success, None on failure
"""
cfgjson = {
"accessMethod" : 0,
"apiKey" : "",
"clientId" : "",
"clientSecret" : "",
"configType" : 1,
"grantFlow" : 2,
"password" : password,
"persistToken" : False,
"redirectPort" : '7070',
"redirectUrl" : "",
"refreshTokenUrl" : "",
"requestTimeout" : '30',
"requestUrl" : "",
"scope" : "",
"state" : "",
"tokenUrl" : basemaps_token_uri,
"username" : username,
"version" : 1
}
if authcfg_id not in auth_manager().availableAuthMethodConfigs():
authConfig = QgsAuthMethodConfig('OAuth2')
authConfig.setId(authcfg_id)
authConfig.setName(authcfg_name)
authConfig.setConfig('oauth2config', json.dumps(cfgjson))
if auth_manager().storeAuthenticationConfig(authConfig):
return authcfg_id
else:
authConfig = QgsAuthMethodConfig()
auth_manager().loadAuthenticationConfig(authcfg_id, authConfig, True)
authConfig.setName(authcfg_name)
authConfig.setConfig('oauth2config', json.dumps(cfgjson))
if auth_manager().updateAuthenticationConfig(authConfig):
return authcfg_id
return None
|
Setup oauth configuration to access the BCS API,
return authcfg_id on success, None on failure
|
entailment
|
def execute_search(
search,
search_terms="",
user=None,
reference="",
save=True,
query_type=SearchQuery.QUERY_TYPE_SEARCH,
):
"""
Create a new SearchQuery instance and execute a search against ES.
Args:
search: elasticsearch.search.Search object, that internally contains
the connection and query; this is the query that is executed. All
we are doing is logging the input and parsing the output.
search_terms: raw end user search terms input - what they typed into the search
box.
user: Django User object, the person making the query - used for logging
purposes. Can be null.
reference: string, can be anything you like, used for identification,
grouping purposes.
save: bool, if True then save the new object immediately, can be
overridden to False to prevent logging absolutely everything.
Defaults to True
query_type: string, used to determine whether to run a search query or
a count query (returns hit count, but no results).
"""
start = time.time()
if query_type == SearchQuery.QUERY_TYPE_SEARCH:
response = search.execute()
hits = [h.meta.to_dict() for h in response.hits]
total_hits = response.hits.total
elif query_type == SearchQuery.QUERY_TYPE_COUNT:
response = total_hits = search.count()
hits = []
else:
raise ValueError(f"Invalid SearchQuery.query_type value: '{query_type}'")
duration = time.time() - start
search_query = SearchQuery(
user=user,
search_terms=search_terms,
index=", ".join(search._index or ["_all"])[:100], # field length restriction
query=search.to_dict(),
query_type=query_type,
hits=hits,
total_hits=total_hits,
reference=reference or "",
executed_at=tz_now(),
duration=duration,
)
search_query.response = response
return search_query.save() if save else search_query
|
Create a new SearchQuery instance and execute a search against ES.
Args:
search: elasticsearch.search.Search object, that internally contains
the connection and query; this is the query that is executed. All
we are doing is logging the input and parsing the output.
search_terms: raw end user search terms input - what they typed into the search
box.
user: Django User object, the person making the query - used for logging
purposes. Can be null.
reference: string, can be anything you like, used for identification,
grouping purposes.
save: bool, if True then save the new object immediately, can be
overridden to False to prevent logging absolutely everything.
Defaults to True
query_type: string, used to determine whether to run a search query or
a count query (returns hit count, but no results).
|
entailment
|
def in_search_queryset(self, instance_id, index="_all"):
"""
Return True if an object is part of the search index queryset.
Sometimes it's useful to know if an object _should_ be indexed. If
an object is saved, how do you know if you should push that change
to the search index? The simplest (albeit not most efficient) way
is to check if it appears in the underlying search queryset.
NB this method doesn't evaluate the entire dataset, it chains an
additional queryset filter expression on the end. That's why it's
important that the `get_search_queryset` method returns a queryset.
Args:
instance_id: the id of model object that we are looking for.
Kwargs:
index: string, the name of the index in which to check.
Defaults to '_all'.
"""
return self.get_search_queryset(index=index).filter(pk=instance_id).exists()
|
Return True if an object is part of the search index queryset.
Sometimes it's useful to know if an object _should_ be indexed. If
an object is saved, how do you know if you should push that change
to the search index? The simplest (albeit not most efficient) way
is to check if it appears in the underlying search queryset.
NB this method doesn't evaluate the entire dataset, it chains an
additional queryset filter expression on the end. That's why it's
important that the `get_search_queryset` method returns a queryset.
Args:
instance_id: the id of model object that we are looking for.
Kwargs:
index: string, the name of the index in which to check.
Defaults to '_all'.
|
entailment
|
def from_search_query(self, search_query):
"""
Return queryset of objects from SearchQuery.results, **in order**.
EXPERIMENTAL: this will only work with results from a single index,
with a single doc_type - as we are returning a single QuerySet.
This method takes the hits JSON and converts that into a queryset
of all the relevant objects. The key part of this is the ordering -
the order in which search results are returned is based on relevance,
something that only ES can calculate, and that cannot be replicated
in the database.
It does this by adding custom SQL which annotates each record with
the score from the search 'hit'. This is brittle, caveat emptor.
The RawSQL clause is in the form:
SELECT CASE {{model}}.id WHEN {{id}} THEN {{score}} END
The "WHEN x THEN y" is repeated for every hit. The resulting SQL, in
full is like this:
SELECT "freelancer_freelancerprofile"."id",
(SELECT CASE freelancer_freelancerprofile.id
WHEN 25 THEN 1.0
WHEN 26 THEN 1.0
[...]
ELSE 0
END) AS "search_score"
FROM "freelancer_freelancerprofile"
WHERE "freelancer_freelancerprofile"."id" IN (25, 26, [...])
ORDER BY "search_score" DESC
It should be very fast, as there is no table lookup, but there is an
assumption at the heart of this, which is that the search query doesn't
contain the entire database - i.e. that it has been paged. (ES itself
caps the results at 10,000.)
"""
hits = search_query.hits
score_sql = self._raw_sql([(h["id"], h["score"] or 0) for h in hits])
rank_sql = self._raw_sql([(hits[i]["id"], i) for i in range(len(hits))])
return (
self.get_queryset()
.filter(pk__in=[h["id"] for h in hits])
# add the query relevance score
.annotate(search_score=RawSQL(score_sql, ()))
# add the ordering number (0-based)
.annotate(search_rank=RawSQL(rank_sql, ()))
.order_by("search_rank")
)
|
Return queryset of objects from SearchQuery.results, **in order**.
EXPERIMENTAL: this will only work with results from a single index,
with a single doc_type - as we are returning a single QuerySet.
This method takes the hits JSON and converts that into a queryset
of all the relevant objects. The key part of this is the ordering -
the order in which search results are returned is based on relevance,
something that only ES can calculate, and that cannot be replicated
in the database.
It does this by adding custom SQL which annotates each record with
the score from the search 'hit'. This is brittle, caveat emptor.
The RawSQL clause is in the form:
SELECT CASE {{model}}.id WHEN {{id}} THEN {{score}} END
The "WHEN x THEN y" is repeated for every hit. The resulting SQL, in
full is like this:
SELECT "freelancer_freelancerprofile"."id",
(SELECT CASE freelancer_freelancerprofile.id
WHEN 25 THEN 1.0
WHEN 26 THEN 1.0
[...]
ELSE 0
END) AS "search_score"
FROM "freelancer_freelancerprofile"
WHERE "freelancer_freelancerprofile"."id" IN (25, 26, [...])
ORDER BY "search_score" DESC
It should be very fast, as there is no table lookup, but there is an
assumption at the heart of this, which is that the search query doesn't
contain the entire database - i.e. that it has been paged. (ES itself
caps the results at 10,000.)
|
entailment
|
def _raw_sql(self, values):
"""Prepare SQL statement consisting of a sequence of WHEN .. THEN statements."""
if isinstance(self.model._meta.pk, CharField):
when_clauses = " ".join(
[self._when("'{}'".format(x), y) for (x, y) in values]
)
else:
when_clauses = " ".join([self._when(x, y) for (x, y) in values])
table_name = self.model._meta.db_table
primary_key = self.model._meta.pk.column
return 'SELECT CASE {}."{}" {} ELSE 0 END'.format(
table_name, primary_key, when_clauses
)
|
Prepare SQL statement consisting of a sequence of WHEN .. THEN statements.
|
entailment
|
def search_document_cache_key(self):
"""Key used for storing search docs in local cache."""
return "elasticsearch_django:{}.{}.{}".format(
self._meta.app_label, self._meta.model_name, self.pk
)
|
Key used for storing search docs in local cache.
|
entailment
|
def _is_field_serializable(self, field_name):
"""Return True if the field can be serialized into a JSON doc."""
return (
self._meta.get_field(field_name).get_internal_type()
in self.SIMPLE_UPDATE_FIELD_TYPES
)
|
Return True if the field can be serialized into a JSON doc.
|
entailment
|
def clean_update_fields(self, index, update_fields):
"""
Clean the list of update_fields based on the index being updated.\
If any field in the update_fields list is not in the set of properties
defined by the index mapping for this model, then we ignore it. If
a field _is_ in the mapping, but the underlying model field is a
related object, and thereby not directly serializable, then this
method will raise a ValueError.
"""
search_fields = get_model_index_properties(self, index)
clean_fields = [f for f in update_fields if f in search_fields]
ignore = [f for f in update_fields if f not in search_fields]
if ignore:
logger.debug(
"Ignoring fields from partial update: %s",
[f for f in update_fields if f not in search_fields],
)
for f in clean_fields:
if not self._is_field_serializable(f):
raise ValueError(
"'%s' cannot be automatically serialized into a search document property. Please override as_search_document_update.",
f,
)
return clean_fields
|
Clean the list of update_fields based on the index being updated.\
If any field in the update_fields list is not in the set of properties
defined by the index mapping for this model, then we ignore it. If
a field _is_ in the mapping, but the underlying model field is a
related object, and thereby not directly serializable, then this
method will raise a ValueError.
|
entailment
|
def as_search_document_update(self, *, index, update_fields):
"""
Return a partial update document based on which fields have been updated.
If an object is saved with the `update_fields` argument passed
through, then it is assumed that this is a 'partial update'. In
this scenario we need a {property: value} dictionary containing
just the fields we want to update.
This method handles two possible update strategies - 'full' or 'partial'.
The default 'full' strategy simply returns the value of `as_search_document`
- thereby replacing the entire document each time. The 'partial' strategy is
more intelligent - it will determine whether the fields passed are in the
search document mapping, and return a partial update document that contains
only those that are. In addition, if any field that _is_ included cannot
be automatically serialized (e.g. a RelatedField object), then this method
will raise a ValueError. In this scenario, you should override this method
in your subclass.
>>> def as_search_document_update(self, index, update_fields):
... if 'user' in update_fields:
... update_fields.remove('user')
... doc = super().as_search_document_update(index, update_fields)
... doc['user'] = self.user.get_full_name()
... return doc
... return super().as_search_document_update(index, update_fields)
You may also wish to subclass this method to perform field-specific logic
- in this example if only the timestamp is being saved, then ignore the
update if the timestamp is later than a certain time.
>>> def as_search_document_update(self, index, update_fields):
... if update_fields == ['timestamp']:
... if self.timestamp > today():
... return {}
... return super().as_search_document_update(index, update_fields)
"""
if UPDATE_STRATEGY == UPDATE_STRATEGY_FULL:
return self.as_search_document(index=index)
if UPDATE_STRATEGY == UPDATE_STRATEGY_PARTIAL:
# in partial mode we update the intersection of update_fields and
# properties found in the mapping file.
return {
k: getattr(self, k)
for k in self.clean_update_fields(
index=index, update_fields=update_fields
)
}
|
Return a partial update document based on which fields have been updated.
If an object is saved with the `update_fields` argument passed
through, then it is assumed that this is a 'partial update'. In
this scenario we need a {property: value} dictionary containing
just the fields we want to update.
This method handles two possible update strategies - 'full' or 'partial'.
The default 'full' strategy simply returns the value of `as_search_document`
- thereby replacing the entire document each time. The 'partial' strategy is
more intelligent - it will determine whether the fields passed are in the
search document mapping, and return a partial update document that contains
only those that are. In addition, if any field that _is_ included cannot
be automatically serialized (e.g. a RelatedField object), then this method
will raise a ValueError. In this scenario, you should override this method
in your subclass.
>>> def as_search_document_update(self, index, update_fields):
... if 'user' in update_fields:
... update_fields.remove('user')
... doc = super().as_search_document_update(index, update_fields)
... doc['user'] = self.user.get_full_name()
... return doc
... return super().as_search_document_update(index, update_fields)
You may also wish to subclass this method to perform field-specific logic
- in this example if only the timestamp is being saved, then ignore the
update if the timestamp is later than a certain time.
>>> def as_search_document_update(self, index, update_fields):
... if update_fields == ['timestamp']:
... if self.timestamp > today():
... return {}
... return super().as_search_document_update(index, update_fields)
|
entailment
|
def as_search_action(self, *, index, action):
"""
Return an object as represented in a bulk api operation.
Bulk API operations have a very specific format. This function will
call the standard `as_search_document` method on the object and then
wrap that up in the correct format for the action specified.
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
Args:
index: string, the name of the index in which the action is to
be taken. Bulk operations are only every carried out on a single
index at a time.
action: string ['index' | 'update' | 'delete'] - this decides
how the final document is formatted.
Returns a dictionary.
"""
if action not in ("index", "update", "delete"):
raise ValueError("Action must be 'index', 'update' or 'delete'.")
document = {
"_index": index,
"_type": self.search_doc_type,
"_op_type": action,
"_id": self.pk,
}
if action == "index":
document["_source"] = self.as_search_document(index=index)
elif action == "update":
document["doc"] = self.as_search_document(index=index)
return document
|
Return an object as represented in a bulk api operation.
Bulk API operations have a very specific format. This function will
call the standard `as_search_document` method on the object and then
wrap that up in the correct format for the action specified.
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
Args:
index: string, the name of the index in which the action is to
be taken. Bulk operations are only every carried out on a single
index at a time.
action: string ['index' | 'update' | 'delete'] - this decides
how the final document is formatted.
Returns a dictionary.
|
entailment
|
def fetch_search_document(self, *, index):
"""Fetch the object's document from a search index by id."""
assert self.pk, "Object must have a primary key before being indexed."
client = get_client()
return client.get(index=index, doc_type=self.search_doc_type, id=self.pk)
|
Fetch the object's document from a search index by id.
|
entailment
|
def index_search_document(self, *, index):
"""
Create or replace search document in named index.
Checks the local cache to see if the document has changed,
and if not aborts the update, else pushes to ES, and then
resets the local cache. Cache timeout is set as "cache_expiry"
in the settings, and defaults to 60s.
"""
cache_key = self.search_document_cache_key
new_doc = self.as_search_document(index=index)
cached_doc = cache.get(cache_key)
if new_doc == cached_doc:
logger.debug("Search document for %r is unchanged, ignoring update.", self)
return []
cache.set(cache_key, new_doc, timeout=get_setting("cache_expiry", 60))
get_client().index(
index=index, doc_type=self.search_doc_type, body=new_doc, id=self.pk
)
|
Create or replace search document in named index.
Checks the local cache to see if the document has changed,
and if not aborts the update, else pushes to ES, and then
resets the local cache. Cache timeout is set as "cache_expiry"
in the settings, and defaults to 60s.
|
entailment
|
def update_search_document(self, *, index, update_fields):
"""
Partial update of a document in named index.
Partial updates are invoked via a call to save the document
with 'update_fields'. These fields are passed to the
as_search_document method so that it can build a partial
document. NB we don't just call as_search_document and then
strip the fields _not_ in update_fields as we are trying
to avoid possibly expensive operations in building the
source document. The canonical example for this method
is updating a single timestamp on a model - we don't want
to have to walk the model relations and build a document
in this case - we just want to push the timestamp.
When POSTing a partial update the `as_search_document` doc
must be passed to the `client.update` wrapped in a "doc" node,
see: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html
"""
doc = self.as_search_document_update(index=index, update_fields=update_fields)
if not doc:
logger.debug("Ignoring object update as document is empty.")
return
get_client().update(
index=index, doc_type=self.search_doc_type, body={"doc": doc}, id=self.pk
)
|
Partial update of a document in named index.
Partial updates are invoked via a call to save the document
with 'update_fields'. These fields are passed to the
as_search_document method so that it can build a partial
document. NB we don't just call as_search_document and then
strip the fields _not_ in update_fields as we are trying
to avoid possibly expensive operations in building the
source document. The canonical example for this method
is updating a single timestamp on a model - we don't want
to have to walk the model relations and build a document
in this case - we just want to push the timestamp.
When POSTing a partial update the `as_search_document` doc
must be passed to the `client.update` wrapped in a "doc" node,
see: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html
|
entailment
|
def delete_search_document(self, *, index):
"""Delete document from named index."""
cache.delete(self.search_document_cache_key)
get_client().delete(index=index, doc_type=self.search_doc_type, id=self.pk)
|
Delete document from named index.
|
entailment
|
def execute(cls, search, search_terms="", user=None, reference=None, save=True):
"""Create a new SearchQuery instance and execute a search against ES."""
warnings.warn(
"Pending deprecation - please use `execute_search` function instead.",
PendingDeprecationWarning,
)
return execute_search(
search, search_terms=search_terms, user=user, reference=reference, save=save
)
|
Create a new SearchQuery instance and execute a search against ES.
|
entailment
|
def save(self, **kwargs):
"""Save and return the object (for chaining)."""
if self.search_terms is None:
self.search_terms = ""
super().save(**kwargs)
return self
|
Save and return the object (for chaining).
|
entailment
|
def page_slice(self):
"""Return the query from:size tuple (0-based)."""
return (
None
if self.query is None
else (self.query.get("from", 0), self.query.get("size", 10))
)
|
Return the query from:size tuple (0-based).
|
entailment
|
def setPluginSetting(name, value, namespace = None):
'''
Sets the value of a plugin setting.
:param name: the name of the setting. It is not the full path, but just the last name of it
:param value: the value to set for the plugin setting
:param namespace: The namespace. If not passed or None, the namespace will be inferred from
the caller method. Normally, this should not be passed, since it suffices to let this function
find out the plugin from where it is being called, and it will automatically use the
corresponding plugin namespace
'''
namespace = namespace or _callerName().split(".")[0]
settings.setValue(namespace + "/" + name, value)
|
Sets the value of a plugin setting.
:param name: the name of the setting. It is not the full path, but just the last name of it
:param value: the value to set for the plugin setting
:param namespace: The namespace. If not passed or None, the namespace will be inferred from
the caller method. Normally, this should not be passed, since it suffices to let this function
find out the plugin from where it is being called, and it will automatically use the
corresponding plugin namespace
|
entailment
|
def pluginSetting(name, namespace=None, typ=None):
'''
Returns the value of a plugin setting.
:param name: the name of the setting. It is not the full path, but just the last name of it
:param namespace: The namespace. If not passed or None, the namespace will be inferred from
the caller method. Normally, this should not be passed, since it suffices to let this function
find out the plugin from where it is being called, and it will automatically use the
corresponding plugin namespace
'''
def _find_in_cache(name, key):
for setting in _settings[namespace]:
if setting["name"] == name:
return setting[key]
return None
def _type_map(t):
"""Return setting python type"""
if t == BOOL:
return bool
elif t == NUMBER:
return float
else:
return unicode
namespace = namespace or _callerName().split(".")[0]
full_name = namespace + "/" + name
if settings.contains(full_name):
if typ is None:
typ = _type_map(_find_in_cache(name, 'type'))
v = settings.value(full_name, None, type=typ)
try:
if isinstance(v, QPyNullVariant):
v = None
except:
pass
return v
else:
return _find_in_cache(name, 'default')
|
Returns the value of a plugin setting.
:param name: the name of the setting. It is not the full path, but just the last name of it
:param namespace: The namespace. If not passed or None, the namespace will be inferred from
the caller method. Normally, this should not be passed, since it suffices to let this function
find out the plugin from where it is being called, and it will automatically use the
corresponding plugin namespace
|
entailment
|
def readSettings(settings_path=None):
global _settings
'''
Reads the settings corresponding to the plugin from where the method is called.
This function has to be called in the __init__ method of the plugin class.
Settings are stored in a settings.json file in the plugin folder.
Here is an eample of such a file:
[
{"name":"mysetting",
"label": "My setting",
"description": "A setting to customize my plugin",
"type": "string",
"default": "dummy string",
"group": "Group 1"
"onEdit": "def f():\\n\\tprint "Value edited in settings dialog"
"onChange": "def f():\\n\\tprint "New settings value has been saved"
},
{"name":"anothersetting",
"label": "Another setting",
"description": "Another setting to customize my plugin",
"type": "number",
"default": 0,
"group": "Group 2"
},
{"name":"achoicesetting",
"label": "A choice setting",
"description": "A setting to select from a set of possible options",
"type": "choice",
"default": "option 1",
"options":["option 1", "option 2", "option 3"],
"group": "Group 2"
}
]
Available types for settings are: string, bool, number, choice, crs and text (a multiline string)
The onEdit property contains a function that will be executed when the user edits the value
in the settings dialog. It shouldl return false if, after it has been executed, the setting
should not be modified and should recover its original value.
The onEdit property contains a function that will be executed when the setting is changed after
closing the settings dialog, or programatically by callin the setPluginSetting method
Both onEdit and onChange are optional properties
'''
namespace = _callerName().split(".")[0]
settings_path = settings_path or os.path.join(os.path.dirname(_callerPath()), "settings.json")
with open(settings_path) as f:
_settings[namespace] = json.load(f)
|
Reads the settings corresponding to the plugin from where the method is called.
This function has to be called in the __init__ method of the plugin class.
Settings are stored in a settings.json file in the plugin folder.
Here is an eample of such a file:
[
{"name":"mysetting",
"label": "My setting",
"description": "A setting to customize my plugin",
"type": "string",
"default": "dummy string",
"group": "Group 1"
"onEdit": "def f():\\n\\tprint "Value edited in settings dialog"
"onChange": "def f():\\n\\tprint "New settings value has been saved"
},
{"name":"anothersetting",
"label": "Another setting",
"description": "Another setting to customize my plugin",
"type": "number",
"default": 0,
"group": "Group 2"
},
{"name":"achoicesetting",
"label": "A choice setting",
"description": "A setting to select from a set of possible options",
"type": "choice",
"default": "option 1",
"options":["option 1", "option 2", "option 3"],
"group": "Group 2"
}
]
Available types for settings are: string, bool, number, choice, crs and text (a multiline string)
The onEdit property contains a function that will be executed when the user edits the value
in the settings dialog. It shouldl return false if, after it has been executed, the setting
should not be modified and should recover its original value.
The onEdit property contains a function that will be executed when the setting is changed after
closing the settings dialog, or programatically by callin the setPluginSetting method
Both onEdit and onChange are optional properties
|
entailment
|
def do_index_command(self, index, **options):
"""Delete search index."""
if options["interactive"]:
logger.warning("This will permanently delete the index '%s'.", index)
if not self._confirm_action():
logger.warning(
"Aborting deletion of index '%s' at user's request.", index
)
return
return delete_index(index)
|
Delete search index.
|
entailment
|
def create_index(index):
"""Create an index and apply mapping if appropriate."""
logger.info("Creating search index: '%s'", index)
client = get_client()
return client.indices.create(index=index, body=get_index_mapping(index))
|
Create an index and apply mapping if appropriate.
|
entailment
|
def update_index(index):
"""Re-index every document in a named index."""
logger.info("Updating search index: '%s'", index)
client = get_client()
responses = []
for model in get_index_models(index):
logger.info("Updating search index model: '%s'", model.search_doc_type)
objects = model.objects.get_search_queryset(index).iterator()
actions = bulk_actions(objects, index=index, action="index")
response = helpers.bulk(client, actions, chunk_size=get_setting("chunk_size"))
responses.append(response)
return responses
|
Re-index every document in a named index.
|
entailment
|
def delete_index(index):
"""Delete index entirely (removes all documents and mapping)."""
logger.info("Deleting search index: '%s'", index)
client = get_client()
return client.indices.delete(index=index)
|
Delete index entirely (removes all documents and mapping).
|
entailment
|
def prune_index(index):
"""Remove all orphaned documents from an index.
This function works by scanning the remote index, and in each returned
batch of documents looking up whether they appear in the default index
queryset. If they don't (they've been deleted, or no longer fit the qs
filters) then they are deleted from the index. The deletion is done in
one hit after the entire remote index has been scanned.
The elasticsearch.helpers.scan function returns each document one at a
time, so this function can swamp the database with SELECT requests.
Please use sparingly.
Returns a list of ids of all the objects deleted.
"""
logger.info("Pruning missing objects from index '%s'", index)
prunes = []
responses = []
client = get_client()
for model in get_index_models(index):
for hit in scan_index(index, model):
obj = _prune_hit(hit, model)
if obj:
prunes.append(obj)
logger.info(
"Found %s objects of type '%s' for deletion from '%s'.",
len(prunes),
model,
index,
)
if len(prunes) > 0:
actions = bulk_actions(prunes, index, "delete")
response = helpers.bulk(
client, actions, chunk_size=get_setting("chunk_size")
)
responses.append(response)
return responses
|
Remove all orphaned documents from an index.
This function works by scanning the remote index, and in each returned
batch of documents looking up whether they appear in the default index
queryset. If they don't (they've been deleted, or no longer fit the qs
filters) then they are deleted from the index. The deletion is done in
one hit after the entire remote index has been scanned.
The elasticsearch.helpers.scan function returns each document one at a
time, so this function can swamp the database with SELECT requests.
Please use sparingly.
Returns a list of ids of all the objects deleted.
|
entailment
|
def _prune_hit(hit, model):
"""
Check whether a document should be pruned.
This method uses the SearchDocumentManagerMixin.in_search_queryset method
to determine whether a 'hit' (search document) should be pruned from an index,
and if so it returns the hit as a Django object(id=hit_id).
Args:
hit: dict object the represents a document as returned from the scan_index
function. (Contains object id and index.)
model: the Django model (not object) from which the document was derived.
Used to get the correct model manager and bulk action.
Returns:
an object of type model, with id=hit_id. NB this is not the object
itself, which by definition may not exist in the underlying database,
but a temporary object with the document id - which is enough to create
a 'delete' action.
"""
hit_id = hit["_id"]
hit_index = hit["_index"]
if model.objects.in_search_queryset(hit_id, index=hit_index):
logger.debug(
"%s with id=%s exists in the '%s' index queryset.", model, hit_id, hit_index
)
return None
else:
logger.debug(
"%s with id=%s does not exist in the '%s' index queryset and will be pruned.",
model,
hit_id,
hit_index,
)
# we don't need the full obj for a delete action, just the id.
# (the object itself may not even exist.)
return model(pk=hit_id)
|
Check whether a document should be pruned.
This method uses the SearchDocumentManagerMixin.in_search_queryset method
to determine whether a 'hit' (search document) should be pruned from an index,
and if so it returns the hit as a Django object(id=hit_id).
Args:
hit: dict object the represents a document as returned from the scan_index
function. (Contains object id and index.)
model: the Django model (not object) from which the document was derived.
Used to get the correct model manager and bulk action.
Returns:
an object of type model, with id=hit_id. NB this is not the object
itself, which by definition may not exist in the underlying database,
but a temporary object with the document id - which is enough to create
a 'delete' action.
|
entailment
|
def scan_index(index, model):
"""
Yield all documents of model type in an index.
This function calls the elasticsearch.helpers.scan function,
and yields all the documents in the index that match the doc_type
produced by a specific Django model.
Args:
index: string, the name of the index to scan, must be a configured
index as returned from settings.get_index_names.
model: a Django model type, used to filter the the documents that
are scanned.
Yields each document of type model in index, one at a time.
"""
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html
query = {"query": {"type": {"value": model._meta.model_name}}}
client = get_client()
for hit in helpers.scan(client, index=index, query=query):
yield hit
|
Yield all documents of model type in an index.
This function calls the elasticsearch.helpers.scan function,
and yields all the documents in the index that match the doc_type
produced by a specific Django model.
Args:
index: string, the name of the index to scan, must be a configured
index as returned from settings.get_index_names.
model: a Django model type, used to filter the the documents that
are scanned.
Yields each document of type model in index, one at a time.
|
entailment
|
def bulk_actions(objects, index, action):
"""
Yield bulk api 'actions' from a collection of objects.
The output from this method can be fed in to the bulk
api helpers - each document returned by get_documents
is decorated with the appropriate bulk api op_type.
Args:
objects: iterable (queryset, list, ...) of SearchDocumentMixin
objects. If the objects passed in is a generator, then this
function will yield the results rather than returning them.
index: string, the name of the index to target - the index name
is embedded into the return value and is used by the bulk api.
action: string ['index' | 'update' | 'delete'] - this decides
how the final document is formatted.
"""
assert (
index != "_all"
), "index arg must be a valid index name. '_all' is a reserved term."
logger.info("Creating bulk '%s' actions for '%s'", action, index)
for obj in objects:
try:
logger.debug("Appending '%s' action for '%r'", action, obj)
yield obj.as_search_action(index=index, action=action)
except Exception:
logger.exception("Unable to create search action for %s", obj)
|
Yield bulk api 'actions' from a collection of objects.
The output from this method can be fed in to the bulk
api helpers - each document returned by get_documents
is decorated with the appropriate bulk api op_type.
Args:
objects: iterable (queryset, list, ...) of SearchDocumentMixin
objects. If the objects passed in is a generator, then this
function will yield the results rather than returning them.
index: string, the name of the index to target - the index name
is embedded into the return value and is used by the bulk api.
action: string ['index' | 'update' | 'delete'] - this decides
how the final document is formatted.
|
entailment
|
def _validate_config(strict=False):
"""Validate settings.SEARCH_SETTINGS."""
for index in settings.get_index_names():
_validate_mapping(index, strict=strict)
for model in settings.get_index_models(index):
_validate_model(model)
if settings.get_setting("update_strategy", "full") not in ["full", "partial"]:
raise ImproperlyConfigured(
"Invalid SEARCH_SETTINGS: 'update_strategy' value must be 'full' or 'partial'."
)
|
Validate settings.SEARCH_SETTINGS.
|
entailment
|
def _validate_mapping(index, strict=False):
"""Check that an index mapping JSON file exists."""
try:
settings.get_index_mapping(index)
except IOError:
if strict:
raise ImproperlyConfigured("Index '%s' has no mapping file." % index)
else:
logger.warning("Index '%s' has no mapping, relying on ES instead.", index)
|
Check that an index mapping JSON file exists.
|
entailment
|
def _validate_model(model):
"""Check that a model configured for an index subclasses the required classes."""
if not hasattr(model, "as_search_document"):
raise ImproperlyConfigured("'%s' must implement `as_search_document`." % model)
if not hasattr(model.objects, "get_search_queryset"):
raise ImproperlyConfigured(
"'%s.objects must implement `get_search_queryset`." % model
)
|
Check that a model configured for an index subclasses the required classes.
|
entailment
|
def _connect_signals():
"""Connect up post_save, post_delete signals for models."""
for index in settings.get_index_names():
for model in settings.get_index_models(index):
_connect_model_signals(model)
|
Connect up post_save, post_delete signals for models.
|
entailment
|
def _connect_model_signals(model):
"""Connect signals for a single model."""
dispatch_uid = "%s.post_save" % model._meta.model_name
logger.debug("Connecting search index model post_save signal: %s", dispatch_uid)
signals.post_save.connect(_on_model_save, sender=model, dispatch_uid=dispatch_uid)
dispatch_uid = "%s.post_delete" % model._meta.model_name
logger.debug("Connecting search index model post_delete signal: %s", dispatch_uid)
signals.post_delete.connect(
_on_model_delete, sender=model, dispatch_uid=dispatch_uid
)
|
Connect signals for a single model.
|
entailment
|
def _on_model_save(sender, **kwargs):
"""Update document in search index post_save."""
instance = kwargs.pop("instance")
update_fields = kwargs.pop("update_fields")
for index in instance.search_indexes:
try:
_update_search_index(
instance=instance, index=index, update_fields=update_fields
)
except Exception:
logger.exception("Error handling 'on_save' signal for %s", instance)
|
Update document in search index post_save.
|
entailment
|
def _on_model_delete(sender, **kwargs):
"""Remove documents from search indexes post_delete."""
instance = kwargs.pop("instance")
for index in instance.search_indexes:
try:
_delete_from_search_index(instance=instance, index=index)
except Exception:
logger.exception("Error handling 'on_delete' signal for %s", instance)
|
Remove documents from search indexes post_delete.
|
entailment
|
def _in_search_queryset(*, instance, index) -> bool:
"""Wrapper around the instance manager method."""
try:
return instance.__class__.objects.in_search_queryset(instance.id, index=index)
except Exception:
logger.exception("Error checking object in_search_queryset.")
return False
|
Wrapper around the instance manager method.
|
entailment
|
def _update_search_index(*, instance, index, update_fields):
"""Process index / update search index update actions."""
if not _in_search_queryset(instance=instance, index=index):
logger.debug(
"Object (%r) is not in search queryset, ignoring update.", instance
)
return
try:
if update_fields:
pre_update.send(
sender=instance.__class__,
instance=instance,
index=index,
update_fields=update_fields,
)
if settings.auto_sync(instance):
instance.update_search_document(
index=index, update_fields=update_fields
)
else:
pre_index.send(sender=instance.__class__, instance=instance, index=index)
if settings.auto_sync(instance):
instance.index_search_document(index=index)
except Exception:
logger.exception("Error handling 'post_save' signal for %s", instance)
|
Process index / update search index update actions.
|
entailment
|
def _delete_from_search_index(*, instance, index):
"""Remove a document from a search index."""
pre_delete.send(sender=instance.__class__, instance=instance, index=index)
if settings.auto_sync(instance):
instance.delete_search_document(index=index)
|
Remove a document from a search index.
|
entailment
|
def ready(self):
"""Validate config and connect signals."""
super(ElasticAppConfig, self).ready()
_validate_config(settings.get_setting("strict_validation"))
_connect_signals()
|
Validate config and connect signals.
|
entailment
|
def get_setting(key, *default):
"""Return specific search setting from Django conf."""
if default:
return get_settings().get(key, default[0])
else:
return get_settings()[key]
|
Return specific search setting from Django conf.
|
entailment
|
def get_index_mapping(index):
"""Return the JSON mapping file for an index.
Mappings are stored as JSON files in the mappings subdirectory of this
app. They must be saved as {{index}}.json.
Args:
index: string, the name of the index to look for.
"""
# app_path = apps.get_app_config('elasticsearch_django').path
mappings_dir = get_setting("mappings_dir")
filename = "%s.json" % index
path = os.path.join(mappings_dir, filename)
with open(path, "r") as f:
return json.load(f)
|
Return the JSON mapping file for an index.
Mappings are stored as JSON files in the mappings subdirectory of this
app. They must be saved as {{index}}.json.
Args:
index: string, the name of the index to look for.
|
entailment
|
def get_model_index_properties(instance, index):
"""Return the list of properties specified for a model in an index."""
mapping = get_index_mapping(index)
doc_type = instance._meta.model_name.lower()
return list(mapping["mappings"][doc_type]["properties"].keys())
|
Return the list of properties specified for a model in an index.
|
entailment
|
def get_index_models(index):
"""Return list of models configured for a named index.
Args:
index: string, the name of the index to look up.
"""
models = []
for app_model in get_index_config(index).get("models"):
app, model = app_model.split(".")
models.append(apps.get_model(app, model))
return models
|
Return list of models configured for a named index.
Args:
index: string, the name of the index to look up.
|
entailment
|
def get_model_indexes(model):
"""Return list of all indexes in which a model is configured.
A model may be configured to appear in multiple indexes. This function
will return the names of the indexes as a list of strings. This is
useful if you want to know which indexes need updating when a model
is saved.
Args:
model: a Django model class.
"""
indexes = []
for index in get_index_names():
for app_model in get_index_models(index):
if app_model == model:
indexes.append(index)
return indexes
|
Return list of all indexes in which a model is configured.
A model may be configured to appear in multiple indexes. This function
will return the names of the indexes as a list of strings. This is
useful if you want to know which indexes need updating when a model
is saved.
Args:
model: a Django model class.
|
entailment
|
def get_document_models():
"""Return dict of index.doc_type: model."""
mappings = {}
for i in get_index_names():
for m in get_index_models(i):
key = "%s.%s" % (i, m._meta.model_name)
mappings[key] = m
return mappings
|
Return dict of index.doc_type: model.
|
entailment
|
def auto_sync(instance):
"""Returns bool if auto_sync is on for the model (instance)"""
# this allows us to turn off sync temporarily - e.g. when doing bulk updates
if not get_setting("auto_sync"):
return False
model_name = "{}.{}".format(instance._meta.app_label, instance._meta.model_name)
if model_name in get_setting("never_auto_sync", []):
return False
return True
|
Returns bool if auto_sync is on for the model (instance)
|
entailment
|
def pprint(data):
"""
Returns an indented HTML pretty-print version of JSON.
Take the event_payload JSON, indent it, order the keys and then
present it as a <code> block. That's about as good as we can get
until someone builds a custom syntax function.
"""
pretty = json.dumps(data, sort_keys=True, indent=4, separators=(",", ": "))
html = pretty.replace(" ", " ").replace("\n", "<br>")
return mark_safe("<code>%s</code>" % html)
|
Returns an indented HTML pretty-print version of JSON.
Take the event_payload JSON, indent it, order the keys and then
present it as a <code> block. That's about as good as we can get
until someone builds a custom syntax function.
|
entailment
|
def addHelpMenu(menuName, parentMenuFunction=None):
'''
Adds a help menu to the plugin menu.
This method should be called from the initGui() method of the plugin
:param menuName: The name of the plugin menu in which the about menu is to be added.
'''
parentMenuFunction = parentMenuFunction or iface.addPluginToMenu
namespace = _callerName().split(".")[0]
path = "file://{}".format(os.path.join(os.path.dirname(_callerPath()), "docs", "html", "index.html"))
helpAction = QtWidgets.QAction(QgsApplication.getThemeIcon('/mActionHelpContents.svg'),
"Plugin help...", iface.mainWindow())
helpAction.setObjectName(namespace + "help")
helpAction.triggered.connect(lambda: openHelp(path))
parentMenuFunction(menuName, helpAction)
global _helpActions
_helpActions[menuName] = helpAction
|
Adds a help menu to the plugin menu.
This method should be called from the initGui() method of the plugin
:param menuName: The name of the plugin menu in which the about menu is to be added.
|
entailment
|
def addAboutMenu(menuName, parentMenuFunction=None):
'''
Adds an 'about...' menu to the plugin menu.
This method should be called from the initGui() method of the plugin
:param menuName: The name of the plugin menu in which the about menu is to be added
'''
parentMenuFunction = parentMenuFunction or iface.addPluginToMenu
namespace = _callerName().split(".")[0]
icon = QtGui.QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), "icons", "help.png"))
aboutAction = QtWidgets.QAction(icon, "About...", iface.mainWindow())
aboutAction.setObjectName(namespace + "about")
aboutAction.triggered.connect(lambda: openAboutDialog(namespace))
parentMenuFunction(menuName, aboutAction)
global _aboutActions
_aboutActions[menuName] = aboutAction
|
Adds an 'about...' menu to the plugin menu.
This method should be called from the initGui() method of the plugin
:param menuName: The name of the plugin menu in which the about menu is to be added
|
entailment
|
def showMessageDialog(title, text):
'''
Show a dialog containing a given text, with a given title.
The text accepts HTML syntax
'''
dlg = QgsMessageOutput.createMessageOutput()
dlg.setTitle(title)
dlg.setMessage(text, QgsMessageOutput.MessageHtml)
dlg.showMessage()
|
Show a dialog containing a given text, with a given title.
The text accepts HTML syntax
|
entailment
|
def askForFiles(parent, msg = None, isSave = False, allowMultiple = False, exts = "*"):
'''
Asks for a file or files, opening the corresponding dialog with the last path that was selected
when this same function was invoked from the calling method.
:param parent: The parent window
:param msg: The message to use for the dialog title
:param isSave: true if we are asking for file to save
:param allowMultiple: True if should allow multiple files to be selected. Ignored if isSave == True
:param exts: Extensions to allow in the file dialog. Can be a single string or a list of them.
Use "*" to add an option that allows all files to be selected
:returns: A string with the selected filepath or an array of them, depending on whether allowMultiple is True of False
'''
msg = msg or 'Select file'
caller = _callerName().split(".")
name = "/".join([LAST_PATH, caller[-1]])
namespace = caller[0]
path = pluginSetting(name, namespace)
f = None
if not isinstance(exts, list):
exts = [exts]
extString = ";; ".join([" %s files (*.%s)" % (e.upper(), e) if e != "*" else "All files (*.*)" for e in exts])
if allowMultiple:
ret = QtWidgets.QFileDialog.getOpenFileNames(parent, msg, path, '*.' + extString)
if ret:
f = ret[0]
else:
f = ret = None
else:
if isSave:
ret = QtWidgets.QFileDialog.getSaveFileName(parent, msg, path, '*.' + extString) or None
if ret is not None and not ret.endswith(exts[0]):
ret += "." + exts[0]
else:
ret = QtWidgets.QFileDialog.getOpenFileName(parent, msg , path, '*.' + extString) or None
f = ret
if f is not None:
setPluginSetting(name, os.path.dirname(f), namespace)
return ret
|
Asks for a file or files, opening the corresponding dialog with the last path that was selected
when this same function was invoked from the calling method.
:param parent: The parent window
:param msg: The message to use for the dialog title
:param isSave: true if we are asking for file to save
:param allowMultiple: True if should allow multiple files to be selected. Ignored if isSave == True
:param exts: Extensions to allow in the file dialog. Can be a single string or a list of them.
Use "*" to add an option that allows all files to be selected
:returns: A string with the selected filepath or an array of them, depending on whether allowMultiple is True of False
|
entailment
|
def askForFolder(parent, msg = None):
'''
Asks for a folder, opening the corresponding dialog with the last path that was selected
when this same function was invoked from the calling method
:param parent: The parent window
:param msg: The message to use for the dialog title
'''
msg = msg or 'Select folder'
caller = _callerName().split(".")
name = "/".join([LAST_PATH, caller[-1]])
namespace = caller[0]
path = pluginSetting(name, namespace)
folder = QtWidgets.QFileDialog.getExistingDirectory(parent, msg, path)
if folder:
setPluginSetting(name, folder, namespace)
return folder
|
Asks for a folder, opening the corresponding dialog with the last path that was selected
when this same function was invoked from the calling method
:param parent: The parent window
:param msg: The message to use for the dialog title
|
entailment
|
def execute(func, message = None):
'''
Executes a lengthy tasks in a separate thread and displays a waiting dialog if needed.
Sets the cursor to wait cursor while the task is running.
This function does not provide any support for progress indication
:param func: The function to execute.
:param message: The message to display in the wait dialog. If not passed, the dialog won't be shown
'''
global _dialog
cursor = QtWidgets.QApplication.overrideCursor()
waitCursor = (cursor is not None and cursor.shape() == QtCore.Qt.WaitCursor)
dialogCreated = False
try:
QtCore.QCoreApplication.processEvents()
if not waitCursor:
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
if message is not None:
t = ExecutorThread(func)
loop = QtCore.QEventLoop()
t.finished.connect(loop.exit, QtCore.Qt.QueuedConnection)
if _dialog is None:
dialogCreated = True
_dialog = QtGui.QProgressDialog(message, "Running", 0, 0, iface.mainWindow())
_dialog.setWindowTitle("Running")
_dialog.setWindowModality(QtCore.Qt.WindowModal);
_dialog.setMinimumDuration(1000)
_dialog.setMaximum(100)
_dialog.setValue(0)
_dialog.setMaximum(0)
_dialog.setCancelButton(None)
else:
oldText = _dialog.labelText()
_dialog.setLabelText(message)
QtWidgets.QApplication.processEvents()
t.start()
loop.exec_(flags = QtCore.QEventLoop.ExcludeUserInputEvents)
if t.exception is not None:
raise t.exception
return t.returnValue
else:
return func()
finally:
if message is not None:
if dialogCreated:
_dialog.reset()
_dialog = None
else:
_dialog.setLabelText(oldText)
if not waitCursor:
QtWidgets.QApplication.restoreOverrideCursor()
QtCore.QCoreApplication.processEvents()
|
Executes a lengthy tasks in a separate thread and displays a waiting dialog if needed.
Sets the cursor to wait cursor while the task is running.
This function does not provide any support for progress indication
:param func: The function to execute.
:param message: The message to display in the wait dialog. If not passed, the dialog won't be shown
|
entailment
|
def disable_search_updates():
"""
Context manager used to temporarily disable auto_sync.
This is useful when performing bulk updates on objects - when
you may not want to flood the indexing process.
>>> with disable_search_updates():
... for obj in model.objects.all():
... obj.save()
The function works by temporarily removing the apps._on_model_save
signal handler from the model.post_save signal receivers, and then
restoring them after.
"""
_receivers = signals.post_save.receivers.copy()
signals.post_save.receivers = _strip_on_model_save()
yield
signals.post_save.receivers = _receivers
|
Context manager used to temporarily disable auto_sync.
This is useful when performing bulk updates on objects - when
you may not want to flood the indexing process.
>>> with disable_search_updates():
... for obj in model.objects.all():
... obj.save()
The function works by temporarily removing the apps._on_model_save
signal handler from the model.post_save signal receivers, and then
restoring them after.
|
entailment
|
def request(self, url, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None, blocking=True):
"""
Make a network request by calling QgsNetworkAccessManager.
redirections argument is ignored and is here only for httplib2 compatibility.
"""
self.msg_log(u'http_call request: {0}'.format(url))
self.blocking_mode = blocking
req = QNetworkRequest()
# Avoid double quoting form QUrl
url = urllib.parse.unquote(url)
req.setUrl(QUrl(url))
if headers is not None:
# This fixes a wierd error with compressed content not being correctly
# inflated.
# If you set the header on the QNetworkRequest you are basically telling
# QNetworkAccessManager "I know what I'm doing, please don't do any content
# encoding processing".
# See: https://bugs.webkit.org/show_bug.cgi?id=63696#c1
try:
del headers['Accept-Encoding']
except KeyError:
pass
for k, v in list(headers.items()):
self.msg_log("Setting header %s to %s" % (k, v))
req.setRawHeader(k.encode(), v.encode())
if self.authid:
self.msg_log("Update request w/ authid: {0}".format(self.authid))
self.auth_manager().updateNetworkRequest(req, self.authid)
if self.reply is not None and self.reply.isRunning():
self.reply.close()
if method.lower() == 'delete':
func = getattr(QgsNetworkAccessManager.instance(), 'deleteResource')
else:
func = getattr(QgsNetworkAccessManager.instance(), method.lower())
# Calling the server ...
# Let's log the whole call for debugging purposes:
self.msg_log("Sending %s request to %s" % (method.upper(), req.url().toString()))
self.on_abort = False
headers = {str(h): str(req.rawHeader(h)) for h in req.rawHeaderList()}
for k, v in list(headers.items()):
self.msg_log("%s: %s" % (k, v))
if method.lower() in ['post', 'put']:
if isinstance(body, io.IOBase):
body = body.read()
if isinstance(body, str):
body = body.encode()
self.reply = func(req, body)
else:
self.reply = func(req)
if self.authid:
self.msg_log("Update reply w/ authid: {0}".format(self.authid))
self.auth_manager().updateNetworkReply(self.reply, self.authid)
# necessary to trap local timout manage by QgsNetworkAccessManager
# calling QgsNetworkAccessManager::abortRequest
QgsNetworkAccessManager.instance().requestTimedOut.connect(self.requestTimedOut)
self.reply.sslErrors.connect(self.sslErrors)
self.reply.finished.connect(self.replyFinished)
self.reply.downloadProgress.connect(self.downloadProgress)
# block if blocking mode otherwise return immediatly
# it's up to the caller to manage listeners in case of no blocking mode
if not self.blocking_mode:
return (None, None)
# Call and block
self.el = QEventLoop()
self.reply.finished.connect(self.el.quit)
# Catch all exceptions (and clean up requests)
try:
self.el.exec_(QEventLoop.ExcludeUserInputEvents)
except Exception as e:
raise e
if self.reply:
self.reply.finished.disconnect(self.el.quit)
# emit exception in case of error
if not self.http_call_result.ok:
if self.http_call_result.exception and not self.exception_class:
raise self.http_call_result.exception
else:
raise self.exception_class(self.http_call_result.reason)
return (self.http_call_result, self.http_call_result.content)
|
Make a network request by calling QgsNetworkAccessManager.
redirections argument is ignored and is here only for httplib2 compatibility.
|
entailment
|
def requestTimedOut(self, reply):
"""Trap the timeout. In Async mode requestTimedOut is called after replyFinished"""
# adapt http_call_result basing on receiving qgs timer timout signal
self.exception_class = RequestsExceptionTimeout
self.http_call_result.exception = RequestsExceptionTimeout("Timeout error")
|
Trap the timeout. In Async mode requestTimedOut is called after replyFinished
|
entailment
|
def sslErrors(self, ssl_errors):
"""
Handle SSL errors, logging them if debug is on and ignoring them
if disable_ssl_certificate_validation is set.
"""
if ssl_errors:
for v in ssl_errors:
self.msg_log("SSL Error: %s" % v.errorString())
if self.disable_ssl_certificate_validation:
self.reply.ignoreSslErrors()
|
Handle SSL errors, logging them if debug is on and ignoring them
if disable_ssl_certificate_validation is set.
|
entailment
|
def abort(self):
"""
Handle request to cancel HTTP call
"""
if (self.reply and self.reply.isRunning()):
self.on_abort = True
self.reply.abort()
|
Handle request to cancel HTTP call
|
entailment
|
def mapLayers(name=None, types=None):
"""
Return all the loaded layers. Filters by name (optional) first and then type (optional)
:param name: (optional) name of layer to return..
:param type: (optional) The QgsMapLayer type of layer to return. Accepts a single value or a list of them
:return: List of loaded layers. If name given will return all layers with matching name.
"""
if types is not None and not isinstance(types, list):
types = [types]
layers = _layerreg.mapLayers().values()
_layers = []
if name or types:
if name:
_layers = [layer for layer in layers if re.match(name, layer.name())]
if types:
_layers += [layer for layer in layers if layer.type() in types]
return _layers
else:
return layers
|
Return all the loaded layers. Filters by name (optional) first and then type (optional)
:param name: (optional) name of layer to return..
:param type: (optional) The QgsMapLayer type of layer to return. Accepts a single value or a list of them
:return: List of loaded layers. If name given will return all layers with matching name.
|
entailment
|
def addLayer(layer, loadInLegend=True):
"""
Add one or several layers to the QGIS session and layer registry.
:param layer: The layer object or list with layers to add the QGIS layer registry and session.
:param loadInLegend: True if this layer should be added to the legend.
:return: The added layer
"""
if not hasattr(layer, "__iter__"):
layer = [layer]
_layerreg.addMapLayers(layer, loadInLegend)
return layer
|
Add one or several layers to the QGIS session and layer registry.
:param layer: The layer object or list with layers to add the QGIS layer registry and session.
:param loadInLegend: True if this layer should be added to the legend.
:return: The added layer
|
entailment
|
def addLayerNoCrsDialog(layer, loadInLegend=True):
'''
Tries to add a layer from layer object
Same as the addLayer method, but it does not ask for CRS, regardless of current
configuration in QGIS settings
'''
settings = QSettings()
prjSetting = settings.value('/Projections/defaultBehaviour')
settings.setValue('/Projections/defaultBehaviour', '')
# QGIS3
prjSetting3 = settings.value('/Projections/defaultBehavior')
settings.setValue('/Projections/defaultBehavior', '')
layer = addLayer(layer, loadInLegend)
settings.setValue('/Projections/defaultBehaviour', prjSetting)
settings.setValue('/Projections/defaultBehavior', prjSetting3)
return layer
|
Tries to add a layer from layer object
Same as the addLayer method, but it does not ask for CRS, regardless of current
configuration in QGIS settings
|
entailment
|
def newVectorLayer(filename, fields, geometryType, crs, encoding="utf-8"):
'''
Creates a new vector layer
:param filename: The filename to store the file. The extensions determines the type of file.
If extension is not among the supported ones, a shapefile will be created and the file will
get an added '.shp' to its path.
If the filename is None, a memory layer will be created
:param fields: the fields to add to the layer. Accepts a QgsFields object or a list of tuples (field_name, field_type)
Accepted field types are basic Python types str, float, int and bool
:param geometryType: The type of geometry of the layer to create.
:param crs: The crs of the layer to create. Accepts a QgsCoordinateSystem object or a string with the CRS authId.
:param encoding: The layer encoding
'''
if isinstance(crs, basestring):
crs = QgsCoordinateReferenceSystem(crs)
if filename is None:
uri = GEOM_TYPE_MAP[geometryType]
if crs.isValid():
uri += '?crs=' + crs.authid() + '&'
fieldsdesc = ['field=' + f for f in fields]
fieldsstring = '&'.join(fieldsdesc)
uri += fieldsstring
layer = QgsVectorLayer(uri, "mem_layer", 'memory')
else:
formats = QgsVectorFileWriter.supportedFiltersAndFormats()
OGRCodes = {}
for (key, value) in formats.items():
extension = unicode(key)
extension = extension[extension.find('*.') + 2:]
extension = extension[:extension.find(' ')]
OGRCodes[extension] = value
extension = os.path.splitext(filename)[1][1:]
if extension not in OGRCodes:
extension = 'shp'
filename = filename + '.shp'
if isinstance(fields, QgsFields):
qgsfields = fields
else:
qgsfields = QgsFields()
for field in fields:
qgsfields.append(_toQgsField(field))
QgsVectorFileWriter(filename, encoding, qgsfields,
geometryType, crs, OGRCodes[extension])
layer = QgsVectorLayer(filename, os.path.basename(filename), 'ogr')
return layer
|
Creates a new vector layer
:param filename: The filename to store the file. The extensions determines the type of file.
If extension is not among the supported ones, a shapefile will be created and the file will
get an added '.shp' to its path.
If the filename is None, a memory layer will be created
:param fields: the fields to add to the layer. Accepts a QgsFields object or a list of tuples (field_name, field_type)
Accepted field types are basic Python types str, float, int and bool
:param geometryType: The type of geometry of the layer to create.
:param crs: The crs of the layer to create. Accepts a QgsCoordinateSystem object or a string with the CRS authId.
:param encoding: The layer encoding
|
entailment
|
def layerFromName(name):
'''
Returns the layer from the current project with the passed name
Raises WrongLayerNameException if no layer with that name is found
If several layers with that name exist, only the first one is returned
'''
layers =_layerreg.mapLayers().values()
for layer in layers:
if layer.name() == name:
return layer
raise WrongLayerNameException()
|
Returns the layer from the current project with the passed name
Raises WrongLayerNameException if no layer with that name is found
If several layers with that name exist, only the first one is returned
|
entailment
|
def layerFromSource(source):
'''
Returns the layer from the current project with the passed source
Raises WrongLayerSourceException if no layer with that source is found
'''
layers =_layerreg.mapLayers().values()
for layer in layers:
if layer.source() == source:
return layer
raise WrongLayerSourceException()
|
Returns the layer from the current project with the passed source
Raises WrongLayerSourceException if no layer with that source is found
|
entailment
|
def loadLayer(filename, name = None, provider=None):
'''
Tries to load a layer from the given file
:param filename: the path to the file to load.
:param name: the name to use for adding the layer to the current project.
If not passed or None, it will use the filename basename
'''
name = name or os.path.splitext(os.path.basename(filename))[0]
if provider != 'gdal': # QGIS3 crashes if opening a raster as vector ... this needs further investigations
qgslayer = QgsVectorLayer(filename, name, provider or "ogr")
if provider == 'gdal' or not qgslayer.isValid():
qgslayer = QgsRasterLayer(filename, name, provider or "gdal")
if not qgslayer.isValid():
raise RuntimeError('Could not load layer: ' + unicode(filename))
return qgslayer
|
Tries to load a layer from the given file
:param filename: the path to the file to load.
:param name: the name to use for adding the layer to the current project.
If not passed or None, it will use the filename basename
|
entailment
|
def loadLayerNoCrsDialog(filename, name=None, provider=None):
'''
Tries to load a layer from the given file
Same as the loadLayer method, but it does not ask for CRS, regardless of current
configuration in QGIS settings
'''
settings = QSettings()
prjSetting = settings.value('/Projections/defaultBehaviour')
settings.setValue('/Projections/defaultBehaviour', '')
# QGIS3:
prjSetting3 = settings.value('/Projections/defaultBehavior')
settings.setValue('/Projections/defaultBehavior', '')
layer = loadLayer(filename, name, provider)
settings.setValue('/Projections/defaultBehaviour', prjSetting)
settings.setValue('/Projections/defaultBehavior', prjSetting3)
return layer
|
Tries to load a layer from the given file
Same as the loadLayer method, but it does not ask for CRS, regardless of current
configuration in QGIS settings
|
entailment
|
def addSettingsMenu(menuName, parentMenuFunction=None):
'''
Adds a 'open settings...' menu to the plugin menu.
This method should be called from the initGui() method of the plugin
:param menuName: The name of the plugin menu in which the settings menu is to be added
:param parentMenuFunction: a function from QgisInterface to indicate where to put the container plugin menu.
If not passed, it uses addPluginToMenu
'''
parentMenuFunction = parentMenuFunction or iface.addPluginToMenu
namespace = _callerName().split(".")[0]
settingsAction = QAction(
QgsApplication.getThemeIcon('/mActionOptions.svg'),
"Plugin Settings...",
iface.mainWindow())
settingsAction.setObjectName(namespace + "settings")
settingsAction.triggered.connect(lambda: openSettingsDialog(namespace))
parentMenuFunction(menuName, settingsAction)
global _settingActions
_settingActions[menuName] = settingsAction
|
Adds a 'open settings...' menu to the plugin menu.
This method should be called from the initGui() method of the plugin
:param menuName: The name of the plugin menu in which the settings menu is to be added
:param parentMenuFunction: a function from QgisInterface to indicate where to put the container plugin menu.
If not passed, it uses addPluginToMenu
|
entailment
|
def openParametersDialog(params, title=None):
'''
Opens a dialog to enter parameters.
Parameters are passed as a list of Parameter objects
Returns a dict with param names as keys and param values as values
Returns None if the dialog was cancelled
'''
QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
dlg = ParametersDialog(params, title)
dlg.exec_()
QApplication.restoreOverrideCursor()
return dlg.values
|
Opens a dialog to enter parameters.
Parameters are passed as a list of Parameter objects
Returns a dict with param names as keys and param values as values
Returns None if the dialog was cancelled
|
entailment
|
def do_index_command(self, index, **options):
"""Rebuild search index."""
if options["interactive"]:
logger.warning("This will permanently delete the index '%s'.", index)
if not self._confirm_action():
logger.warning(
"Aborting rebuild of index '%s' at user's request.", index
)
return
try:
delete = delete_index(index)
except TransportError:
delete = {}
logger.info("Index %s does not exist, cannot be deleted.", index)
create = create_index(index)
update = update_index(index)
return {"delete": delete, "create": create, "update": update}
|
Rebuild search index.
|
entailment
|
def handle(self, *args, **options):
"""Run do_index_command on each specified index and log the output."""
for index in options.pop("indexes"):
data = {}
try:
data = self.do_index_command(index, **options)
except TransportError as ex:
logger.warning("ElasticSearch threw an error: %s", ex)
data = {"index": index, "status": ex.status_code, "reason": ex.error}
finally:
logger.info(data)
|
Run do_index_command on each specified index and log the output.
|
entailment
|
def create(self, Name, Subject, HtmlBody=None, TextBody=None, Alias=None):
"""
Creates a template.
:param Name: Name of template
:param Subject: The content to use for the Subject when this template is used to send email.
:param HtmlBody: The content to use for the HtmlBody when this template is used to send email.
:param TextBody: The content to use for the HtmlBody when this template is used to send email.
:return:
"""
assert TextBody or HtmlBody, "Provide either email TextBody or HtmlBody or both"
data = {"Name": Name, "Subject": Subject, "HtmlBody": HtmlBody, "TextBody": TextBody, "Alias": Alias}
return self._init_instance(self.call("POST", "/templates", data=data))
|
Creates a template.
:param Name: Name of template
:param Subject: The content to use for the Subject when this template is used to send email.
:param HtmlBody: The content to use for the HtmlBody when this template is used to send email.
:param TextBody: The content to use for the HtmlBody when this template is used to send email.
:return:
|
entailment
|
def get_logger(name, verbosity, stream):
"""
Returns simple console logger.
"""
logger = logging.getLogger(name)
logger.setLevel(
{0: DEFAULT_LOGGING_LEVEL, 1: logging.INFO, 2: logging.DEBUG}.get(min(2, verbosity), DEFAULT_LOGGING_LEVEL)
)
logger.handlers = []
handler = logging.StreamHandler(stream)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(handler)
return logger
|
Returns simple console logger.
|
entailment
|
def from_config(cls, config, prefix="postmark_", is_uppercase=False):
"""
Helper method for instantiating PostmarkClient from dict-like objects.
"""
kwargs = {}
for arg in get_args(cls):
key = prefix + arg
if is_uppercase:
key = key.upper()
else:
key = key.lower()
if key in config:
kwargs[arg] = config[key]
return cls(**kwargs)
|
Helper method for instantiating PostmarkClient from dict-like objects.
|
entailment
|
def chunks(container, n):
"""
Split a container into n-sized chunks.
"""
for i in range(0, len(container), n):
yield container[i : i + n]
|
Split a container into n-sized chunks.
|
entailment
|
def sizes(count, offset=0, max_chunk=500):
"""
Helper to iterate over remote data via count & offset pagination.
"""
if count is None:
chunk = max_chunk
while True:
yield chunk, offset
offset += chunk
else:
while count:
chunk = min(count, max_chunk)
count = max(0, count - max_chunk)
yield chunk, offset
offset += chunk
|
Helper to iterate over remote data via count & offset pagination.
|
entailment
|
def raise_for_response(self, responses):
"""
Constructs appropriate exception from list of responses and raises it.
"""
exception_messages = [self.client.format_exception_message(response) for response in responses]
if len(exception_messages) == 1:
message = exception_messages[0]
else:
message = "[%s]" % ", ".join(exception_messages)
raise PostmarkerException(message)
|
Constructs appropriate exception from list of responses and raises it.
|
entailment
|
def list_to_csv(value):
"""
Converts list to string with comma separated values. For string is no-op.
"""
if isinstance(value, (list, tuple, set)):
value = ",".join(value)
return value
|
Converts list to string with comma separated values. For string is no-op.
|
entailment
|
def prepare_attachments(attachment):
"""
Converts incoming attachment into dictionary.
"""
if isinstance(attachment, tuple):
result = {"Name": attachment[0], "Content": attachment[1], "ContentType": attachment[2]}
if len(attachment) == 4:
result["ContentID"] = attachment[3]
elif isinstance(attachment, MIMEBase):
payload = attachment.get_payload()
content_type = attachment.get_content_type()
# Special case for message/rfc822
# Even if RFC implies such attachments being not base64-encoded,
# Postmark requires all attachments to be encoded in this way
if content_type == "message/rfc822" and not isinstance(payload, str):
payload = b64encode(payload[0].get_payload(decode=True)).decode()
result = {
"Name": attachment.get_filename() or "attachment.txt",
"Content": payload,
"ContentType": content_type,
}
content_id = attachment.get("Content-ID")
if content_id:
if content_id.startswith("<") and content_id.endswith(">"):
content_id = content_id[1:-1]
if (attachment.get("Content-Disposition") or "").startswith("inline"):
content_id = "cid:%s" % content_id
result["ContentID"] = content_id
elif isinstance(attachment, str):
content_type = guess_content_type(attachment)
filename = os.path.basename(attachment)
with open(attachment, "rb") as fd:
data = fd.read()
result = {"Name": filename, "Content": b64encode(data).decode("utf-8"), "ContentType": content_type}
else:
result = attachment
return result
|
Converts incoming attachment into dictionary.
|
entailment
|
def as_dict(self):
"""
Additionally encodes headers.
:return:
"""
data = super(BaseEmail, self).as_dict()
data["Headers"] = [{"Name": name, "Value": value} for name, value in data["Headers"].items()]
for field in ("To", "Cc", "Bcc"):
if field in data:
data[field] = list_to_csv(data[field])
data["Attachments"] = [prepare_attachments(attachment) for attachment in data["Attachments"]]
return data
|
Additionally encodes headers.
:return:
|
entailment
|
def attach_binary(self, content, filename):
"""
Attaches given binary data.
:param bytes content: Binary data to be attached.
:param str filename:
:return: None.
"""
content_type = guess_content_type(filename)
payload = {"Name": filename, "Content": b64encode(content).decode("utf-8"), "ContentType": content_type}
self.attach(payload)
|
Attaches given binary data.
:param bytes content: Binary data to be attached.
:param str filename:
:return: None.
|
entailment
|
def from_mime(cls, message, manager):
"""
Instantiates ``Email`` instance from ``MIMEText`` instance.
:param message: ``email.mime.text.MIMEText`` instance.
:param manager: :py:class:`EmailManager` instance.
:return: :py:class:`Email`
"""
text, html, attachments = deconstruct_multipart(message)
subject = prepare_header(message["Subject"])
sender = prepare_header(message["From"])
to = prepare_header(message["To"])
cc = prepare_header(message["Cc"])
bcc = prepare_header(message["Bcc"])
reply_to = prepare_header(message["Reply-To"])
tag = getattr(message, "tag", None)
return cls(
manager=manager,
From=sender,
To=to,
TextBody=text,
HtmlBody=html,
Subject=subject,
Cc=cc,
Bcc=bcc,
ReplyTo=reply_to,
Attachments=attachments,
Tag=tag,
)
|
Instantiates ``Email`` instance from ``MIMEText`` instance.
:param message: ``email.mime.text.MIMEText`` instance.
:param manager: :py:class:`EmailManager` instance.
:return: :py:class:`Email`
|
entailment
|
def as_dict(self, **extra):
"""
Converts all available emails to dictionaries.
:return: List of dictionaries.
"""
return [self._construct_email(email, **extra) for email in self.emails]
|
Converts all available emails to dictionaries.
:return: List of dictionaries.
|
entailment
|
def _construct_email(self, email, **extra):
"""
Converts incoming data to properly structured dictionary.
"""
if isinstance(email, dict):
email = Email(manager=self._manager, **email)
elif isinstance(email, (MIMEText, MIMEMultipart)):
email = Email.from_mime(email, self._manager)
elif not isinstance(email, Email):
raise ValueError
email._update(extra)
return email.as_dict()
|
Converts incoming data to properly structured dictionary.
|
entailment
|
def send(self, **extra):
"""
Sends email batch.
:return: Information about sent emails.
:rtype: `list`
"""
emails = self.as_dict(**extra)
responses = [self._manager._send_batch(*batch) for batch in chunks(emails, self.MAX_SIZE)]
return sum(responses, [])
|
Sends email batch.
:return: Information about sent emails.
:rtype: `list`
|
entailment
|
def send(
self,
message=None,
From=None,
To=None,
Cc=None,
Bcc=None,
Subject=None,
Tag=None,
HtmlBody=None,
TextBody=None,
Metadata=None,
ReplyTo=None,
Headers=None,
TrackOpens=None,
TrackLinks="None",
Attachments=None,
):
"""
Sends a single email.
:param message: :py:class:`Email` or ``email.mime.text.MIMEText`` instance.
:param str From: The sender email address.
:param To: Recipient's email address.
Multiple recipients could be specified as a list or string with comma separated values.
:type To: str or list
:param Cc: Cc recipient's email address.
Multiple Cc recipients could be specified as a list or string with comma separated values.
:type Cc: str or list
:param Bcc: Bcc recipient's email address.
Multiple Bcc recipients could be specified as a list or string with comma separated values.
:type Bcc: str or list
:param str Subject: Email subject.
:param str Tag: Email tag.
:param str HtmlBody: HTML email message.
:param str TextBody: Plain text email message.
:param str ReplyTo: Reply To override email address.
:param dict Headers: Dictionary of custom headers to include.
:param bool TrackOpens: Activate open tracking for this email.
:param str TrackLinks: Activate link tracking for links in the HTML or Text bodies of this email.
:param list Attachments: List of attachments.
:return: Information about sent email.
:rtype: `dict`
"""
assert not (message and (From or To)), "You should specify either message or From and To parameters"
assert TrackLinks in ("None", "HtmlAndText", "HtmlOnly", "TextOnly")
if message is None:
message = self.Email(
From=From,
To=To,
Cc=Cc,
Bcc=Bcc,
Subject=Subject,
Tag=Tag,
HtmlBody=HtmlBody,
TextBody=TextBody,
Metadata=Metadata,
ReplyTo=ReplyTo,
Headers=Headers,
TrackOpens=TrackOpens,
TrackLinks=TrackLinks,
Attachments=Attachments,
)
elif isinstance(message, (MIMEText, MIMEMultipart)):
message = Email.from_mime(message, self)
elif not isinstance(message, Email):
raise TypeError("message should be either Email or MIMEText or MIMEMultipart instance")
return message.send()
|
Sends a single email.
:param message: :py:class:`Email` or ``email.mime.text.MIMEText`` instance.
:param str From: The sender email address.
:param To: Recipient's email address.
Multiple recipients could be specified as a list or string with comma separated values.
:type To: str or list
:param Cc: Cc recipient's email address.
Multiple Cc recipients could be specified as a list or string with comma separated values.
:type Cc: str or list
:param Bcc: Bcc recipient's email address.
Multiple Bcc recipients could be specified as a list or string with comma separated values.
:type Bcc: str or list
:param str Subject: Email subject.
:param str Tag: Email tag.
:param str HtmlBody: HTML email message.
:param str TextBody: Plain text email message.
:param str ReplyTo: Reply To override email address.
:param dict Headers: Dictionary of custom headers to include.
:param bool TrackOpens: Activate open tracking for this email.
:param str TrackLinks: Activate link tracking for links in the HTML or Text bodies of this email.
:param list Attachments: List of attachments.
:return: Information about sent email.
:rtype: `dict`
|
entailment
|
def Email(
self,
From,
To,
Cc=None,
Bcc=None,
Subject=None,
Tag=None,
HtmlBody=None,
TextBody=None,
Metadata=None,
ReplyTo=None,
Headers=None,
TrackOpens=None,
TrackLinks="None",
Attachments=None,
):
"""
Constructs :py:class:`Email` instance.
:return: :py:class:`Email`
"""
return Email(
manager=self,
From=From,
To=To,
Cc=Cc,
Bcc=Bcc,
Subject=Subject,
Tag=Tag,
HtmlBody=HtmlBody,
TextBody=TextBody,
Metadata=Metadata,
ReplyTo=ReplyTo,
Headers=Headers,
TrackOpens=TrackOpens,
TrackLinks=TrackLinks,
Attachments=Attachments,
)
|
Constructs :py:class:`Email` instance.
:return: :py:class:`Email`
|
entailment
|
def EmailTemplate(
self,
TemplateId,
TemplateModel,
From,
To,
TemplateAlias=None,
Cc=None,
Bcc=None,
Subject=None,
Tag=None,
ReplyTo=None,
Headers=None,
TrackOpens=None,
TrackLinks="None",
Attachments=None,
InlineCss=True,
):
"""
Constructs :py:class:`EmailTemplate` instance.
:return: :py:class:`EmailTemplate`
"""
return EmailTemplate(
manager=self,
TemplateId=TemplateId,
TemplateAlias=TemplateAlias,
TemplateModel=TemplateModel,
From=From,
To=To,
Cc=Cc,
Bcc=Bcc,
Subject=Subject,
Tag=Tag,
ReplyTo=ReplyTo,
Headers=Headers,
TrackOpens=TrackOpens,
TrackLinks=TrackLinks,
Attachments=Attachments,
InlineCss=InlineCss,
)
|
Constructs :py:class:`EmailTemplate` instance.
:return: :py:class:`EmailTemplate`
|
entailment
|
def activate(self):
"""
Activates the bounce instance and updates it with the latest data.
:return: Activation status.
:rtype: `str`
"""
response = self._manager.activate(self.ID)
self._update(response["Bounce"])
return response["Message"]
|
Activates the bounce instance and updates it with the latest data.
:return: Activation status.
:rtype: `str`
|
entailment
|
def all(
self,
count=500,
offset=0,
type=None,
inactive=None,
emailFilter=None,
tag=None,
messageID=None,
fromdate=None,
todate=None,
):
"""
Returns many bounces.
:param int count: Number of bounces to return per request.
:param int offset: Number of bounces to skip.
:param str type: Filter by type of bounce.
:param bool inactive: Filter by emails that were deactivated by Postmark due to the bounce.
:param str emailFilter: Filter by email address.
:param str tag: Filter by tag.
:param str messageID: Filter by messageID.
:param date fromdate: Filter messages starting from the date specified (inclusive).
:param date todate: Filter messages up to the date specified (inclusive).
:return: A list of :py:class:`Bounce` instances.
:rtype: `list`
"""
responses = self.call_many(
"GET",
"/bounces/",
count=count,
offset=offset,
type=type,
inactive=inactive,
emailFilter=emailFilter,
tag=tag,
messageID=messageID,
fromdate=fromdate,
todate=todate,
)
return self.expand_responses(responses, "Bounces")
|
Returns many bounces.
:param int count: Number of bounces to return per request.
:param int offset: Number of bounces to skip.
:param str type: Filter by type of bounce.
:param bool inactive: Filter by emails that were deactivated by Postmark due to the bounce.
:param str emailFilter: Filter by email address.
:param str tag: Filter by tag.
:param str messageID: Filter by messageID.
:param date fromdate: Filter messages starting from the date specified (inclusive).
:param date todate: Filter messages up to the date specified (inclusive).
:return: A list of :py:class:`Bounce` instances.
:rtype: `list`
|
entailment
|
def update_kwargs(self, kwargs, count, offset):
"""
Helper to support handy dictionaries merging on all Python versions.
"""
kwargs.update({self.count_key: count, self.offset_key: offset})
return kwargs
|
Helper to support handy dictionaries merging on all Python versions.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.