id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,200
|
__main__.py
|
getpelican_pelican/pelican/__main__.py
|
"""
python -m pelican module entry point to run via python -m
"""
from . import main
if __name__ == "__main__":
main()
| 125
|
Python
|
.py
| 6
| 18.833333
| 57
| 0.623932
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,201
|
paginator.py
|
getpelican_pelican/pelican/paginator.py
|
import functools
import logging
import os
from collections import namedtuple
from math import ceil
logger = logging.getLogger(__name__)
PaginationRule = namedtuple( # noqa: PYI024
"PaginationRule",
"min_page URL SAVE_AS",
)
class Paginator:
def __init__(self, name, url, object_list, settings, per_page=None):
self.name = name
self.url = url
self.object_list = object_list
self.settings = settings
if per_page:
self.per_page = per_page
self.orphans = settings["DEFAULT_ORPHANS"]
else:
self.per_page = len(object_list)
self.orphans = 0
self._num_pages = self._count = None
def page(self, number):
"Returns a Page object for the given 1-based page number."
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return Page(
self.name,
self.url,
self.object_list[bottom:top],
number,
self,
self.settings,
)
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"Returns the total number of pages."
if self._num_pages is None:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / (float(self.per_page) or 1)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return list(range(1, self.num_pages + 1))
page_range = property(_get_page_range)
class Page:
def __init__(self, name, url, object_list, number, paginator, settings):
self.full_name = name
self.name, self.extension = os.path.splitext(name)
dn, fn = os.path.split(name)
self.base_name = dn if fn in ("index.htm", "index.html") else self.name
self.base_url = url
self.object_list = object_list
self.number = number
self.paginator = paginator
self.settings = settings
def __repr__(self):
return f"<Page {self.number} of {self.paginator.num_pages}>"
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.number + 1
def previous_page_number(self):
return self.number - 1
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
def _from_settings(self, key):
"""Returns URL information as defined in settings. Similar to
URLWrapper._from_settings, but specialized to deal with pagination
logic."""
rule = None
# find the last matching pagination rule
for p in self.settings["PAGINATION_PATTERNS"]:
if p.min_page == -1:
if not self.has_next():
rule = p
break
elif p.min_page <= self.number:
rule = p
if not rule:
return ""
prop_value = getattr(rule, key)
if not isinstance(prop_value, str):
logger.warning("%s is set to %s", key, prop_value)
return prop_value
# URL or SAVE_AS is a string, format it with a controlled context
context = {
"save_as": self.full_name,
"url": self.base_url,
"name": self.name,
"base_name": self.base_name,
"extension": self.extension,
"number": self.number,
}
ret = prop_value.format(**context)
# Remove a single leading slash, if any. This is done for backwards
# compatibility reasons. If a leading slash is needed (for URLs
# relative to server root or absolute URLs without the scheme such as
# //blog.my.site/), it can be worked around by prefixing the pagination
# pattern by an additional slash (which then gets removed, preserving
# the other slashes). This also means the following code *can't* be
# changed to lstrip() because that would remove all leading slashes and
# thus make the workaround impossible. See
# test_custom_pagination_pattern() for a verification of this.
if ret.startswith("/"):
ret = ret[1:]
return ret
url = property(functools.partial(_from_settings, key="URL"))
save_as = property(functools.partial(_from_settings, key="SAVE_AS"))
| 5,556
|
Python
|
.py
| 141
| 30.304965
| 79
| 0.602043
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,202
|
settings.py
|
getpelican_pelican/pelican/settings.py
|
import copy
import importlib.util
import inspect
import locale
import logging
import os
import re
import sys
from os.path import isabs
from pathlib import Path
from types import ModuleType
from typing import Any, Dict, Optional
from pelican.log import LimitFilter
def load_source(name: str, path: str) -> ModuleType:
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
sys.modules[name] = mod
spec.loader.exec_module(mod)
return mod
logger = logging.getLogger(__name__)
Settings = Dict[str, Any]
DEFAULT_THEME = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "themes", "notmyidea"
)
DEFAULT_CONFIG = {
"PATH": os.curdir,
"ARTICLE_PATHS": [""],
"ARTICLE_EXCLUDES": [],
"PAGE_PATHS": ["pages"],
"PAGE_EXCLUDES": [],
"THEME": DEFAULT_THEME,
"OUTPUT_PATH": "output",
"READERS": {},
"STATIC_PATHS": ["images"],
"STATIC_EXCLUDES": [],
"STATIC_EXCLUDE_SOURCES": True,
"THEME_STATIC_DIR": "theme",
"THEME_STATIC_PATHS": [
"static",
],
"FEED_ALL_ATOM": "feeds/all.atom.xml",
"CATEGORY_FEED_ATOM": "feeds/{slug}.atom.xml",
"AUTHOR_FEED_ATOM": "feeds/{slug}.atom.xml",
"AUTHOR_FEED_RSS": "feeds/{slug}.rss.xml",
"TRANSLATION_FEED_ATOM": "feeds/all-{lang}.atom.xml",
"FEED_MAX_ITEMS": 100,
"RSS_FEED_SUMMARY_ONLY": True,
"FEED_APPEND_REF": False,
"SITEURL": "",
"SITENAME": "A Pelican Blog",
"DISPLAY_PAGES_ON_MENU": True,
"DISPLAY_CATEGORIES_ON_MENU": True,
"DOCUTILS_SETTINGS": {},
"OUTPUT_SOURCES": False,
"OUTPUT_SOURCES_EXTENSION": ".text",
"USE_FOLDER_AS_CATEGORY": True,
"DEFAULT_CATEGORY": "misc",
"WITH_FUTURE_DATES": True,
"CSS_FILE": "main.css",
"NEWEST_FIRST_ARCHIVES": True,
"REVERSE_CATEGORY_ORDER": False,
"DELETE_OUTPUT_DIRECTORY": False,
"OUTPUT_RETENTION": [],
"INDEX_SAVE_AS": "index.html",
"ARTICLE_URL": "{slug}.html",
"ARTICLE_SAVE_AS": "{slug}.html",
"ARTICLE_ORDER_BY": "reversed-date",
"ARTICLE_LANG_URL": "{slug}-{lang}.html",
"ARTICLE_LANG_SAVE_AS": "{slug}-{lang}.html",
"DRAFT_URL": "drafts/{slug}.html",
"DRAFT_SAVE_AS": "drafts/{slug}.html",
"DRAFT_LANG_URL": "drafts/{slug}-{lang}.html",
"DRAFT_LANG_SAVE_AS": "drafts/{slug}-{lang}.html",
"PAGE_URL": "pages/{slug}.html",
"PAGE_SAVE_AS": "pages/{slug}.html",
"PAGE_ORDER_BY": "basename",
"PAGE_LANG_URL": "pages/{slug}-{lang}.html",
"PAGE_LANG_SAVE_AS": "pages/{slug}-{lang}.html",
"DRAFT_PAGE_URL": "drafts/pages/{slug}.html",
"DRAFT_PAGE_SAVE_AS": "drafts/pages/{slug}.html",
"DRAFT_PAGE_LANG_URL": "drafts/pages/{slug}-{lang}.html",
"DRAFT_PAGE_LANG_SAVE_AS": "drafts/pages/{slug}-{lang}.html",
"STATIC_URL": "{path}",
"STATIC_SAVE_AS": "{path}",
"STATIC_CREATE_LINKS": False,
"STATIC_CHECK_IF_MODIFIED": False,
"CATEGORY_URL": "category/{slug}.html",
"CATEGORY_SAVE_AS": "category/{slug}.html",
"TAG_URL": "tag/{slug}.html",
"TAG_SAVE_AS": "tag/{slug}.html",
"AUTHOR_URL": "author/{slug}.html",
"AUTHOR_SAVE_AS": "author/{slug}.html",
"PAGINATION_PATTERNS": [
(1, "{name}{extension}", "{name}{extension}"),
(2, "{name}{number}{extension}", "{name}{number}{extension}"),
],
"YEAR_ARCHIVE_URL": "",
"YEAR_ARCHIVE_SAVE_AS": "",
"MONTH_ARCHIVE_URL": "",
"MONTH_ARCHIVE_SAVE_AS": "",
"DAY_ARCHIVE_URL": "",
"DAY_ARCHIVE_SAVE_AS": "",
"RELATIVE_URLS": False,
"DEFAULT_LANG": "en",
"ARTICLE_TRANSLATION_ID": "slug",
"PAGE_TRANSLATION_ID": "slug",
"DIRECT_TEMPLATES": ["index", "tags", "categories", "authors", "archives"],
"THEME_TEMPLATES_OVERRIDES": [],
"PAGINATED_TEMPLATES": {
"index": None,
"tag": None,
"category": None,
"author": None,
},
"PELICAN_CLASS": "pelican.Pelican",
"DEFAULT_DATE_FORMAT": "%a %d %B %Y",
"DATE_FORMATS": {},
"MARKDOWN": {
"extension_configs": {
"markdown.extensions.codehilite": {"css_class": "highlight"},
"markdown.extensions.extra": {},
"markdown.extensions.meta": {},
},
"output_format": "html5",
},
"JINJA_FILTERS": {},
"JINJA_GLOBALS": {},
"JINJA_TESTS": {},
"JINJA_ENVIRONMENT": {
"trim_blocks": True,
"lstrip_blocks": True,
"extensions": [],
},
"LOG_FILTER": [],
"LOCALE": [""], # defaults to user locale
"DEFAULT_PAGINATION": False,
"DEFAULT_ORPHANS": 0,
"DEFAULT_METADATA": {},
"FILENAME_METADATA": r"(?P<date>\d{4}-\d{2}-\d{2}).*",
"PATH_METADATA": "",
"EXTRA_PATH_METADATA": {},
"ARTICLE_PERMALINK_STRUCTURE": "",
"TYPOGRIFY": False,
"TYPOGRIFY_IGNORE_TAGS": [],
"TYPOGRIFY_DASHES": "default",
"SUMMARY_END_SUFFIX": "…",
"SUMMARY_MAX_LENGTH": 50,
"PLUGIN_PATHS": [],
"PLUGINS": None,
"PYGMENTS_RST_OPTIONS": {},
"TEMPLATE_PAGES": {},
"TEMPLATE_EXTENSIONS": [".html"],
"IGNORE_FILES": [".#*"],
"SLUG_REGEX_SUBSTITUTIONS": [
(r"[^\w\s-]", ""), # remove non-alphabetical/whitespace/'-' chars
(r"(?u)\A\s*", ""), # strip leading whitespace
(r"(?u)\s*\Z", ""), # strip trailing whitespace
(r"[-\s]+", "-"), # reduce multiple whitespace or '-' to single '-'
],
"INTRASITE_LINK_REGEX": "[{|](?P<what>.*?)[|}]",
"SLUGIFY_SOURCE": "title",
"SLUGIFY_USE_UNICODE": False,
"SLUGIFY_PRESERVE_CASE": False,
"CACHE_CONTENT": False,
"CONTENT_CACHING_LAYER": "reader",
"CACHE_PATH": "cache",
"GZIP_CACHE": True,
"CHECK_MODIFIED_METHOD": "mtime",
"LOAD_CONTENT_CACHE": False,
"FORMATTED_FIELDS": ["summary"],
"PORT": 8000,
"BIND": "127.0.0.1",
}
PYGMENTS_RST_OPTIONS = None
def read_settings(
path: Optional[str] = None, override: Optional[Settings] = None
) -> Settings:
settings = override or {}
if path:
settings = dict(get_settings_from_file(path), **settings)
if settings:
settings = handle_deprecated_settings(settings)
if path:
# Make relative paths absolute
def getabs(maybe_relative, base_path=path):
if isabs(maybe_relative):
return maybe_relative
return os.path.abspath(
os.path.normpath(
os.path.join(os.path.dirname(base_path), maybe_relative)
)
)
for p in ["PATH", "OUTPUT_PATH", "THEME", "CACHE_PATH"]:
if settings.get(p) is not None:
absp = getabs(settings[p])
# THEME may be a name rather than a path
if p != "THEME" or os.path.exists(absp):
settings[p] = absp
if settings.get("PLUGIN_PATHS") is not None:
settings["PLUGIN_PATHS"] = [
getabs(pluginpath) for pluginpath in settings["PLUGIN_PATHS"]
]
settings = dict(copy.deepcopy(DEFAULT_CONFIG), **settings)
settings = configure_settings(settings)
# This is because there doesn't seem to be a way to pass extra
# parameters to docutils directive handlers, so we have to have a
# variable here that we'll import from within Pygments.run (see
# rstdirectives.py) to see what the user defaults were.
global PYGMENTS_RST_OPTIONS # noqa: PLW0603
PYGMENTS_RST_OPTIONS = settings.get("PYGMENTS_RST_OPTIONS", None)
return settings
def get_settings_from_module(module: Optional[ModuleType] = None) -> Settings:
"""Loads settings from a module, returns a dictionary."""
context = {}
if module is not None:
context.update((k, v) for k, v in inspect.getmembers(module) if k.isupper())
return context
def get_settings_from_file(path: str) -> Settings:
"""Loads settings from a file path, returning a dict."""
name, ext = os.path.splitext(os.path.basename(path))
module = load_source(name, path)
return get_settings_from_module(module)
def get_jinja_environment(settings: Settings) -> Settings:
"""Sets the environment for Jinja"""
jinja_env = settings.setdefault(
"JINJA_ENVIRONMENT", DEFAULT_CONFIG["JINJA_ENVIRONMENT"]
)
# Make sure we include the defaults if the user has set env variables
for key, value in DEFAULT_CONFIG["JINJA_ENVIRONMENT"].items():
if key not in jinja_env:
jinja_env[key] = value
return settings
def _printf_s_to_format_field(printf_string: str, format_field: str) -> str:
"""Tries to replace %s with {format_field} in the provided printf_string.
Raises ValueError in case of failure.
"""
TEST_STRING = "PELICAN_PRINTF_S_DEPRECATION"
expected = printf_string % TEST_STRING
result = printf_string.replace("{", "{{").replace("}", "}}") % f"{{{format_field}}}"
if result.format(**{format_field: TEST_STRING}) != expected:
raise ValueError(f"Failed to safely replace %s with {{{format_field}}}")
return result
def handle_deprecated_settings(settings: Settings) -> Settings:
"""Converts deprecated settings and issues warnings. Issues an exception
if both old and new setting is specified.
"""
# PLUGIN_PATH -> PLUGIN_PATHS
if "PLUGIN_PATH" in settings:
logger.warning(
"PLUGIN_PATH setting has been replaced by "
"PLUGIN_PATHS, moving it to the new setting name."
)
settings["PLUGIN_PATHS"] = settings["PLUGIN_PATH"]
del settings["PLUGIN_PATH"]
# PLUGIN_PATHS: str -> [str]
if isinstance(settings.get("PLUGIN_PATHS"), str):
logger.warning(
"Defining PLUGIN_PATHS setting as string "
"has been deprecated (should be a list)"
)
settings["PLUGIN_PATHS"] = [settings["PLUGIN_PATHS"]]
# JINJA_EXTENSIONS -> JINJA_ENVIRONMENT > extensions
if "JINJA_EXTENSIONS" in settings:
logger.warning(
"JINJA_EXTENSIONS setting has been deprecated, "
"moving it to JINJA_ENVIRONMENT setting."
)
settings["JINJA_ENVIRONMENT"]["extensions"] = settings["JINJA_EXTENSIONS"]
del settings["JINJA_EXTENSIONS"]
# {ARTICLE,PAGE}_DIR -> {ARTICLE,PAGE}_PATHS
for key in ["ARTICLE", "PAGE"]:
old_key = key + "_DIR"
new_key = key + "_PATHS"
if old_key in settings:
logger.warning(
"Deprecated setting %s, moving it to %s list", old_key, new_key
)
settings[new_key] = [settings[old_key]] # also make a list
del settings[old_key]
# EXTRA_TEMPLATES_PATHS -> THEME_TEMPLATES_OVERRIDES
if "EXTRA_TEMPLATES_PATHS" in settings:
logger.warning(
"EXTRA_TEMPLATES_PATHS is deprecated use "
"THEME_TEMPLATES_OVERRIDES instead."
)
if settings.get("THEME_TEMPLATES_OVERRIDES"):
raise Exception(
"Setting both EXTRA_TEMPLATES_PATHS and "
"THEME_TEMPLATES_OVERRIDES is not permitted. Please move to "
"only setting THEME_TEMPLATES_OVERRIDES."
)
settings["THEME_TEMPLATES_OVERRIDES"] = settings["EXTRA_TEMPLATES_PATHS"]
del settings["EXTRA_TEMPLATES_PATHS"]
# MD_EXTENSIONS -> MARKDOWN
if "MD_EXTENSIONS" in settings:
logger.warning(
"MD_EXTENSIONS is deprecated use MARKDOWN "
"instead. Falling back to the default."
)
settings["MARKDOWN"] = DEFAULT_CONFIG["MARKDOWN"]
# LESS_GENERATOR -> Webassets plugin
# FILES_TO_COPY -> STATIC_PATHS, EXTRA_PATH_METADATA
for old, new, doc in [
("LESS_GENERATOR", "the Webassets plugin", None),
(
"FILES_TO_COPY",
"STATIC_PATHS and EXTRA_PATH_METADATA",
"https://github.com/getpelican/pelican/"
"blob/main/docs/settings.rst#path-metadata",
),
]:
if old in settings:
message = f"The {old} setting has been removed in favor of {new}"
if doc:
message += f", see {doc} for details"
logger.warning(message)
# PAGINATED_DIRECT_TEMPLATES -> PAGINATED_TEMPLATES
if "PAGINATED_DIRECT_TEMPLATES" in settings:
message = "The {} setting has been removed in favor of {}".format(
"PAGINATED_DIRECT_TEMPLATES", "PAGINATED_TEMPLATES"
)
logger.warning(message)
# set PAGINATED_TEMPLATES
if "PAGINATED_TEMPLATES" not in settings:
settings["PAGINATED_TEMPLATES"] = {
"tag": None,
"category": None,
"author": None,
}
for t in settings["PAGINATED_DIRECT_TEMPLATES"]:
if t not in settings["PAGINATED_TEMPLATES"]:
settings["PAGINATED_TEMPLATES"][t] = None
del settings["PAGINATED_DIRECT_TEMPLATES"]
# {SLUG,CATEGORY,TAG,AUTHOR}_SUBSTITUTIONS ->
# {SLUG,CATEGORY,TAG,AUTHOR}_REGEX_SUBSTITUTIONS
url_settings_url = "http://docs.getpelican.com/en/latest/settings.html#url-settings"
flavours = {"SLUG", "CATEGORY", "TAG", "AUTHOR"}
old_values = {
f: settings[f + "_SUBSTITUTIONS"]
for f in flavours
if f + "_SUBSTITUTIONS" in settings
}
new_values = {
f: settings[f + "_REGEX_SUBSTITUTIONS"]
for f in flavours
if f + "_REGEX_SUBSTITUTIONS" in settings
}
if old_values and new_values:
raise Exception(
"Setting both {new_key} and {old_key} (or variants thereof) is "
"not permitted. Please move to only setting {new_key}.".format(
old_key="SLUG_SUBSTITUTIONS", new_key="SLUG_REGEX_SUBSTITUTIONS"
)
)
if old_values:
message = (
"{} and variants thereof are deprecated and will be "
"removed in the future. Please use {} and variants thereof "
"instead. Check {}.".format(
"SLUG_SUBSTITUTIONS", "SLUG_REGEX_SUBSTITUTIONS", url_settings_url
)
)
logger.warning(message)
if old_values.get("SLUG"):
for f in ("CATEGORY", "TAG"):
if old_values.get(f):
old_values[f] = old_values["SLUG"] + old_values[f]
old_values["AUTHOR"] = old_values.get("AUTHOR", [])
for f in flavours:
if old_values.get(f) is not None:
regex_subs = []
# by default will replace non-alphanum characters
replace = True
for tpl in old_values[f]:
try:
src, dst, skip = tpl
if skip:
replace = False
except ValueError:
src, dst = tpl
regex_subs.append((re.escape(src), dst.replace("\\", r"\\")))
if replace:
regex_subs += [
(r"[^\w\s-]", ""),
(r"(?u)\A\s*", ""),
(r"(?u)\s*\Z", ""),
(r"[-\s]+", "-"),
]
else:
regex_subs += [
(r"(?u)\A\s*", ""),
(r"(?u)\s*\Z", ""),
]
settings[f + "_REGEX_SUBSTITUTIONS"] = regex_subs
settings.pop(f + "_SUBSTITUTIONS", None)
# `%s` -> '{slug}` or `{lang}` in FEED settings
for key in ["TRANSLATION_FEED_ATOM", "TRANSLATION_FEED_RSS"]:
if (
settings.get(key)
and not isinstance(settings[key], Path)
and "%s" in settings[key]
):
logger.warning("%%s usage in %s is deprecated, use {lang} instead.", key)
try:
settings[key] = _printf_s_to_format_field(settings[key], "lang")
except ValueError:
logger.warning(
"Failed to convert %%s to {lang} for %s. "
"Falling back to default.",
key,
)
settings[key] = DEFAULT_CONFIG[key]
for key in [
"AUTHOR_FEED_ATOM",
"AUTHOR_FEED_RSS",
"CATEGORY_FEED_ATOM",
"CATEGORY_FEED_RSS",
"TAG_FEED_ATOM",
"TAG_FEED_RSS",
]:
if (
settings.get(key)
and not isinstance(settings[key], Path)
and "%s" in settings[key]
):
logger.warning("%%s usage in %s is deprecated, use {slug} instead.", key)
try:
settings[key] = _printf_s_to_format_field(settings[key], "slug")
except ValueError:
logger.warning(
"Failed to convert %%s to {slug} for %s. "
"Falling back to default.",
key,
)
settings[key] = DEFAULT_CONFIG[key]
# CLEAN_URLS
if settings.get("CLEAN_URLS", False):
logger.warning(
"Found deprecated `CLEAN_URLS` in settings."
" Modifying the following settings for the"
" same behaviour."
)
settings["ARTICLE_URL"] = "{slug}/"
settings["ARTICLE_LANG_URL"] = "{slug}-{lang}/"
settings["PAGE_URL"] = "pages/{slug}/"
settings["PAGE_LANG_URL"] = "pages/{slug}-{lang}/"
for setting in ("ARTICLE_URL", "ARTICLE_LANG_URL", "PAGE_URL", "PAGE_LANG_URL"):
logger.warning("%s = '%s'", setting, settings[setting])
# AUTORELOAD_IGNORE_CACHE -> --ignore-cache
if settings.get("AUTORELOAD_IGNORE_CACHE"):
logger.warning(
"Found deprecated `AUTORELOAD_IGNORE_CACHE` in "
"settings. Use --ignore-cache instead."
)
settings.pop("AUTORELOAD_IGNORE_CACHE")
# ARTICLE_PERMALINK_STRUCTURE
if settings.get("ARTICLE_PERMALINK_STRUCTURE", False):
logger.warning(
"Found deprecated `ARTICLE_PERMALINK_STRUCTURE` in"
" settings. Modifying the following settings for"
" the same behaviour."
)
structure = settings["ARTICLE_PERMALINK_STRUCTURE"]
# Convert %(variable) into {variable}.
structure = re.sub(r"%\((\w+)\)s", r"{\g<1>}", structure)
# Convert %x into {date:%x} for strftime
structure = re.sub(r"(%[A-z])", r"{date:\g<1>}", structure)
# Strip a / prefix
structure = re.sub("^/", "", structure)
for setting in (
"ARTICLE_URL",
"ARTICLE_LANG_URL",
"PAGE_URL",
"PAGE_LANG_URL",
"DRAFT_URL",
"DRAFT_LANG_URL",
"ARTICLE_SAVE_AS",
"ARTICLE_LANG_SAVE_AS",
"DRAFT_SAVE_AS",
"DRAFT_LANG_SAVE_AS",
"PAGE_SAVE_AS",
"PAGE_LANG_SAVE_AS",
):
settings[setting] = os.path.join(structure, settings[setting])
logger.warning("%s = '%s'", setting, settings[setting])
# {,TAG,CATEGORY,TRANSLATION}_FEED -> {,TAG,CATEGORY,TRANSLATION}_FEED_ATOM
for new, old in [
("FEED", "FEED_ATOM"),
("TAG_FEED", "TAG_FEED_ATOM"),
("CATEGORY_FEED", "CATEGORY_FEED_ATOM"),
("TRANSLATION_FEED", "TRANSLATION_FEED_ATOM"),
]:
if settings.get(new, False):
logger.warning(
"Found deprecated `%(new)s` in settings. Modify %(new)s "
"to %(old)s in your settings and theme for the same "
"behavior. Temporarily setting %(old)s for backwards "
"compatibility.",
{"new": new, "old": old},
)
settings[old] = settings[new]
# Warn if removed WRITE_SELECTED is present
if "WRITE_SELECTED" in settings:
logger.warning(
"WRITE_SELECTED is present in settings but this functionality was removed. "
"It will have no effect."
)
return settings
def configure_settings(settings: Settings) -> Settings:
"""Provide optimizations, error checking, and warnings for the given
settings.
Also, specify the log messages to be ignored.
"""
if "PATH" not in settings or not os.path.isdir(settings["PATH"]):
raise Exception(
"You need to specify a path containing the content"
" (see pelican --help for more information)"
)
# specify the log messages to be ignored
log_filter = settings.get("LOG_FILTER", DEFAULT_CONFIG["LOG_FILTER"])
LimitFilter._ignore.update(set(log_filter))
# lookup the theme in "pelican/themes" if the given one doesn't exist
if not os.path.isdir(settings["THEME"]):
theme_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "themes", settings["THEME"]
)
if os.path.exists(theme_path):
settings["THEME"] = theme_path
else:
raise Exception("Could not find the theme {}".format(settings["THEME"]))
# standardize strings to lowercase strings
for key in ["DEFAULT_LANG"]:
if key in settings:
settings[key] = settings[key].lower()
# set defaults for Jinja environment
settings = get_jinja_environment(settings)
# standardize strings to lists
for key in ["LOCALE"]:
if key in settings and isinstance(settings[key], str):
settings[key] = [settings[key]]
# check settings that must be a particular type
for key, types in [
("OUTPUT_SOURCES_EXTENSION", str),
("FILENAME_METADATA", str),
]:
if key in settings and not isinstance(settings[key], types):
value = settings.pop(key)
logger.warn(
"Detected misconfigured %s (%s), falling back to the default (%s)",
key,
value,
DEFAULT_CONFIG[key],
)
# try to set the different locales, fallback on the default.
locales = settings.get("LOCALE", DEFAULT_CONFIG["LOCALE"])
for locale_ in locales:
try:
locale.setlocale(locale.LC_ALL, str(locale_))
break # break if it is successful
except locale.Error:
pass
else:
logger.warning(
"Locale could not be set. Check the LOCALE setting, ensuring it "
"is valid and available on your system."
)
if "SITEURL" in settings:
# If SITEURL has a trailing slash, remove it and provide a warning
siteurl = settings["SITEURL"]
if siteurl.endswith("/"):
settings["SITEURL"] = siteurl[:-1]
logger.warning("Removed extraneous trailing slash from SITEURL.")
# If SITEURL is defined but FEED_DOMAIN isn't,
# set FEED_DOMAIN to SITEURL
if "FEED_DOMAIN" not in settings:
settings["FEED_DOMAIN"] = settings["SITEURL"]
# check content caching layer and warn of incompatibilities
if (
settings.get("CACHE_CONTENT", False)
and settings.get("CONTENT_CACHING_LAYER", "") == "generator"
and not settings.get("WITH_FUTURE_DATES", True)
):
logger.warning(
"WITH_FUTURE_DATES conflicts with CONTENT_CACHING_LAYER "
"set to 'generator', use 'reader' layer instead"
)
# Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined
feed_keys = [
"FEED_ATOM",
"FEED_RSS",
"FEED_ALL_ATOM",
"FEED_ALL_RSS",
"CATEGORY_FEED_ATOM",
"CATEGORY_FEED_RSS",
"AUTHOR_FEED_ATOM",
"AUTHOR_FEED_RSS",
"TAG_FEED_ATOM",
"TAG_FEED_RSS",
"TRANSLATION_FEED_ATOM",
"TRANSLATION_FEED_RSS",
]
if any(settings.get(k) for k in feed_keys):
if not settings.get("SITEURL"):
logger.warning(
"Feeds generated without SITEURL set properly may not be valid"
)
if "TIMEZONE" not in settings:
logger.warning(
"No timezone information specified in the settings. Assuming"
" your timezone is UTC for feed generation. Check "
"https://docs.getpelican.com/en/latest/settings.html#TIMEZONE "
"for more information"
)
# fix up pagination rules
from pelican.paginator import PaginationRule
pagination_rules = [
PaginationRule(*r)
for r in settings.get(
"PAGINATION_PATTERNS",
DEFAULT_CONFIG["PAGINATION_PATTERNS"],
)
]
settings["PAGINATION_PATTERNS"] = sorted(
pagination_rules,
key=lambda r: r[0],
)
# Save people from accidentally setting a string rather than a list
path_keys = (
"ARTICLE_EXCLUDES",
"DEFAULT_METADATA",
"DIRECT_TEMPLATES",
"THEME_TEMPLATES_OVERRIDES",
"FILES_TO_COPY",
"IGNORE_FILES",
"PAGINATED_DIRECT_TEMPLATES",
"PLUGINS",
"STATIC_EXCLUDES",
"STATIC_PATHS",
"THEME_STATIC_PATHS",
"ARTICLE_PATHS",
"PAGE_PATHS",
)
for PATH_KEY in filter(lambda k: k in settings, path_keys):
if isinstance(settings[PATH_KEY], str):
logger.warning(
"Detected misconfiguration with %s setting "
"(must be a list), falling back to the default",
PATH_KEY,
)
settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]
# Add {PAGE,ARTICLE}_PATHS to {ARTICLE,PAGE}_EXCLUDES
mutually_exclusive = ("ARTICLE", "PAGE")
for type_1, type_2 in [mutually_exclusive, mutually_exclusive[::-1]]:
try:
includes = settings[type_1 + "_PATHS"]
excludes = settings[type_2 + "_EXCLUDES"]
for path in includes:
if path not in excludes:
excludes.append(path)
except KeyError:
continue # setting not specified, nothing to do
return settings
| 26,023
|
Python
|
.py
| 662
| 30.243202
| 88
| 0.578379
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,203
|
utils.py
|
getpelican_pelican/pelican/utils.py
|
from __future__ import annotations
import datetime
import fnmatch
import locale
import logging
import os
import pathlib
import re
import shutil
import sys
import traceback
import urllib
from collections.abc import Hashable
from contextlib import contextmanager
from functools import partial
from html import entities
from html.parser import HTMLParser
from itertools import groupby
from operator import attrgetter
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Iterable,
Sequence,
)
import dateutil.parser
from watchfiles import Change
try:
from zoneinfo import ZoneInfo
except ModuleNotFoundError:
from backports.zoneinfo import ZoneInfo
import watchfiles
from markupsafe import Markup
if TYPE_CHECKING:
from pelican.contents import Content
from pelican.settings import Settings
logger = logging.getLogger(__name__)
def sanitised_join(base_directory: str, *parts: str) -> str:
joined = posixize_path(os.path.abspath(os.path.join(base_directory, *parts)))
base = posixize_path(os.path.abspath(base_directory))
if not joined.startswith(base):
raise RuntimeError(f"Attempted to break out of output directory to {joined}")
return joined
def strftime(date: datetime.datetime, date_format: str) -> str:
"""
Enhanced replacement for built-in strftime with zero stripping
This works by 'grabbing' possible format strings (those starting with %),
formatting them with the date, stripping any leading zeros if - prefix is
used and replacing formatted output back.
"""
def strip_zeros(x):
return x.lstrip("0") or "0"
# includes ISO date parameters added by Python 3.6
c89_directives = "aAbBcdfGHIjmMpSUuVwWxXyYzZ%"
# grab candidate format options
format_options = "%[-]?."
candidates = re.findall(format_options, date_format)
# replace candidates with placeholders for later % formatting
template = re.sub(format_options, "%s", date_format)
formatted_candidates = []
for candidate in candidates:
# test for valid C89 directives only
if candidate[-1] in c89_directives:
# check for '-' prefix
if len(candidate) == 3: # noqa: PLR2004
# '-' prefix
candidate = f"%{candidate[-1]}"
conversion = strip_zeros
else:
conversion = None
# format date
if isinstance(date, SafeDatetime):
formatted = date.strftime(candidate, safe=False)
else:
formatted = date.strftime(candidate)
# strip zeros if '-' prefix is used
if conversion:
formatted = conversion(formatted)
else:
formatted = candidate
formatted_candidates.append(formatted)
# put formatted candidates back and return
return template % tuple(formatted_candidates)
class SafeDatetime(datetime.datetime):
"""Subclass of datetime that works with utf-8 format strings on PY2"""
def strftime(self, fmt, safe=True):
"""Uses our custom strftime if supposed to be *safe*"""
if safe:
return strftime(self, fmt)
else:
return super().strftime(fmt)
class DateFormatter:
"""A date formatter object used as a jinja filter
Uses the `strftime` implementation and makes sure jinja uses the locale
defined in LOCALE setting
"""
def __init__(self) -> None:
self.locale = locale.setlocale(locale.LC_TIME)
# python has issue with Turkish_Türkiye.1254 locale, replace it to
# something accepted: Turkish
if self.locale == "Turkish_Türkiye.1254":
self.locale = "Turkish"
def __call__(self, date: datetime.datetime, date_format: str) -> str:
# on OSX, encoding from LC_CTYPE determines the unicode output in PY3
# make sure it's same as LC_TIME
with temporary_locale(self.locale, locale.LC_TIME), temporary_locale(
self.locale, locale.LC_CTYPE
):
formatted = strftime(date, date_format)
return formatted
class memoized:
"""Function decorator to cache return values.
If called later with the same arguments, the cached value is returned
(not reevaluated).
"""
def __init__(self, func: Callable) -> None:
self.func = func
self.cache: dict[Any, Any] = {}
def __call__(self, *args) -> Any:
if not isinstance(args, Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self) -> str | None:
return self.func.__doc__
def __get__(self, obj: Any, objtype):
"""Support instance methods."""
fn = partial(self.__call__, obj)
fn.cache = self.cache
return fn
def deprecated_attribute(
old: str,
new: str,
since: tuple[int, ...],
remove: tuple[int, ...] | None = None,
doc: str | None = None,
):
"""Attribute deprecation decorator for gentle upgrades
For example:
class MyClass (object):
@deprecated_attribute(
old='abc', new='xyz', since=(3, 2, 0), remove=(4, 1, 3))
def abc(): return None
def __init__(self):
xyz = 5
Note that the decorator needs a dummy method to attach to, but the
content of the dummy method is ignored.
"""
def _warn():
version = ".".join(str(x) for x in since)
message = [f"{old} has been deprecated since {version}"]
if remove:
version = ".".join(str(x) for x in remove)
message.append(f" and will be removed by version {version}")
message.append(f". Use {new} instead.")
logger.warning("".join(message))
logger.debug("".join(str(x) for x in traceback.format_stack()))
def fget(self):
_warn()
return getattr(self, new)
def fset(self, value):
_warn()
setattr(self, new, value)
def decorator(dummy):
return property(fget=fget, fset=fset, doc=doc)
return decorator
def get_date(string: str) -> datetime.datetime:
"""Return a datetime object from a string.
If no format matches the given date, raise a ValueError.
"""
string = re.sub(" +", " ", string)
default = SafeDatetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
try:
return dateutil.parser.parse(string, default=default)
except (TypeError, ValueError):
raise ValueError(f"{string!r} is not a valid date") from None
@contextmanager
def pelican_open(
filename: str, mode: str = "r", strip_crs: bool = (sys.platform == "win32")
) -> Generator[str, None, None]:
"""Open a file and return its content"""
# utf-8-sig will clear any BOM if present
with open(filename, mode, encoding="utf-8-sig") as infile:
content = infile.read()
yield content
def slugify(
value: str,
regex_subs: Iterable[tuple[str, str]] = (),
preserve_case: bool = False,
use_unicode: bool = False,
) -> str:
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
Took from Django sources.
For a set of sensible default regex substitutions to pass to regex_subs
look into pelican.settings.DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS'].
"""
import unicodedata
import unidecode
def normalize_unicode(text: str) -> str:
# normalize text by compatibility composition
# see: https://en.wikipedia.org/wiki/Unicode_equivalence
return unicodedata.normalize("NFKC", text)
# strip tags from value
value = Markup(value).striptags()
# normalization
value = normalize_unicode(value)
if not use_unicode:
# ASCII-fy
value = unidecode.unidecode(value)
# perform regex substitutions
for src, dst in regex_subs:
value = re.sub(
normalize_unicode(src), normalize_unicode(dst), value, flags=re.IGNORECASE
)
if not preserve_case:
value = value.lower()
return value.strip()
def copy(source: str, destination: str, ignores: Iterable[str] | None = None) -> None:
"""Recursively copy source into destination.
If source is a file, destination has to be a file as well.
The function is able to copy either files or directories.
:param source: the source file or directory
:param destination: the destination file or directory
:param ignores: either None, or a list of glob patterns;
files matching those patterns will _not_ be copied.
"""
def walk_error(err):
logger.warning("While copying %s: %s: %s", source_, err.filename, err.strerror)
source_ = os.path.abspath(os.path.expanduser(source))
destination_ = os.path.abspath(os.path.expanduser(destination))
if ignores is None:
ignores = []
if any(fnmatch.fnmatch(os.path.basename(source), ignore) for ignore in ignores):
logger.info("Not copying %s due to ignores", source_)
return
if os.path.isfile(source_):
dst_dir = os.path.dirname(destination_)
if not os.path.exists(dst_dir):
logger.info("Creating directory %s", dst_dir)
os.makedirs(dst_dir)
logger.info("Copying %s to %s", source_, destination_)
copy_file(source_, destination_)
elif os.path.isdir(source_):
if not os.path.exists(destination_):
logger.info("Creating directory %s", destination_)
os.makedirs(destination_)
if not os.path.isdir(destination_):
logger.warning(
"Cannot copy %s (a directory) to %s (a file)", source_, destination_
)
return
for src_dir, subdirs, others in os.walk(source_, followlinks=True):
dst_dir = os.path.join(destination_, os.path.relpath(src_dir, source_))
subdirs[:] = (
s for s in subdirs if not any(fnmatch.fnmatch(s, i) for i in ignores)
)
others[:] = (
o for o in others if not any(fnmatch.fnmatch(o, i) for i in ignores)
)
if not os.path.isdir(dst_dir):
logger.info("Creating directory %s", dst_dir)
# Parent directories are known to exist, so 'mkdir' suffices.
os.mkdir(dst_dir)
for o in others:
src_path = os.path.join(src_dir, o)
dst_path = os.path.join(dst_dir, o)
if os.path.isfile(src_path):
logger.info("Copying %s to %s", src_path, dst_path)
copy_file(src_path, dst_path)
else:
logger.warning(
"Skipped copy %s (not a file or directory) to %s",
src_path,
dst_path,
)
def copy_file(source: str, destination: str) -> None:
"""Copy a file"""
try:
shutil.copyfile(source, destination)
except OSError as e:
logger.warning(
"A problem occurred copying file %s to %s; %s", source, destination, e
)
def clean_output_dir(path: str, retention: Iterable[str]) -> None:
"""Remove all files from output directory except those in retention list"""
if not os.path.exists(path):
logger.debug("Directory already removed: %s", path)
return
if not os.path.isdir(path):
try:
os.remove(path)
except Exception as e:
logger.error("Unable to delete file %s; %s", path, e)
return
# remove existing content from output folder unless in retention list
for filename in os.listdir(path):
file = os.path.join(path, filename)
if any(filename == retain for retain in retention):
logger.debug(
"Skipping deletion; %s is on retention list: %s", filename, file
)
elif os.path.isdir(file):
try:
shutil.rmtree(file)
logger.debug("Deleted directory %s", file)
except Exception as e:
logger.error("Unable to delete directory %s; %s", file, e)
elif os.path.isfile(file) or os.path.islink(file):
try:
os.remove(file)
logger.debug("Deleted file/link %s", file)
except Exception as e:
logger.error("Unable to delete file %s; %s", file, e)
else:
logger.error("Unable to delete %s, file type unknown", file)
def get_relative_path(path: str) -> str:
"""Return the relative path from the given path to the root path."""
components = split_all(path)
if components is None or len(components) <= 1:
return os.curdir
else:
parents = [os.pardir] * (len(components) - 1)
return os.path.join(*parents)
def path_to_url(path: str) -> str:
"""Return the URL corresponding to a given path."""
if path is not None:
path = posixize_path(path)
return path
def posixize_path(rel_path: str) -> str:
"""Use '/' as path separator, so that source references,
like '{static}/foo/bar.jpg' or 'extras/favicon.ico',
will work on Windows as well as on Mac and Linux."""
return rel_path.replace(os.sep, "/")
class _HTMLWordTruncator(HTMLParser):
_word_regex = re.compile(
r"{DBC}|(\w[\w'-]*)".format(
# DBC means CJK-like characters. An character can stand for a word.
DBC=(
"([\u4e00-\u9fff])|" # CJK Unified Ideographs
"([\u3400-\u4dbf])|" # CJK Unified Ideographs Extension A
"([\uf900-\ufaff])|" # CJK Compatibility Ideographs
"([\U00020000-\U0002a6df])|" # CJK Unified Ideographs Extension B
"([\U0002f800-\U0002fa1f])|" # CJK Compatibility Ideographs Supplement
"([\u3040-\u30ff])|" # Hiragana and Katakana
"([\u1100-\u11ff])|" # Hangul Jamo
"([\uac00-\ud7ff])|" # Hangul Compatibility Jamo
"([\u3130-\u318f])" # Hangul Syllables
)
),
re.UNICODE,
)
_word_prefix_regex = re.compile(r"\w", re.U)
_singlets = ("br", "col", "link", "base", "img", "param", "area", "hr", "input")
class TruncationCompleted(Exception):
def __init__(self, truncate_at: int) -> None:
super().__init__(truncate_at)
self.truncate_at = truncate_at
def __init__(self, max_words: int) -> None:
super().__init__(convert_charrefs=False)
self.max_words = max_words
self.words_found = 0
self.open_tags = []
self.last_word_end = None
self.truncate_at: int | None = None
def feed(self, *args, **kwargs) -> None:
try:
super().feed(*args, **kwargs)
except self.TruncationCompleted as exc:
self.truncate_at = exc.truncate_at
else:
self.truncate_at = None
def getoffset(self) -> int:
line_start = 0
lineno, line_offset = self.getpos()
for _ in range(lineno - 1):
line_start = self.rawdata.index("\n", line_start) + 1
return line_start + line_offset
def add_word(self, word_end: int) -> None:
self.words_found += 1
self.last_word_end = None
if self.words_found == self.max_words:
raise self.TruncationCompleted(word_end)
def add_last_word(self) -> None:
if self.last_word_end is not None:
self.add_word(self.last_word_end)
def handle_starttag(self, tag: str, attrs: Any) -> None:
self.add_last_word()
if tag not in self._singlets:
self.open_tags.insert(0, tag)
def handle_endtag(self, tag: str) -> None:
self.add_last_word()
try:
i = self.open_tags.index(tag)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
del self.open_tags[: i + 1]
def handle_data(self, data: str) -> None:
word_end = 0
offset = self.getoffset()
while self.words_found < self.max_words:
match = self._word_regex.search(data, word_end)
if not match:
break
if match.start(0) > 0:
self.add_last_word()
word_end = match.end(0)
self.last_word_end = offset + word_end
if word_end < len(data):
self.add_last_word()
def _handle_ref(self, name: str, char: str) -> None:
"""
Called by handle_entityref() or handle_charref() when a ref like
`—`, `—`, or `—` is found.
The arguments for this method are:
- `name`: the HTML entity name (such as `mdash` or `#8212` or `#x2014`)
- `char`: the Unicode representation of the ref (such as `—`)
This method checks whether the entity is considered to be part of a
word or not and, if not, signals the end of a word.
"""
# Compute the index of the character right after the ref.
#
# In a string like 'prefix—suffix', the end is the sum of:
#
# - `self.getoffset()` (the length of `prefix`)
# - `1` (the length of `&`)
# - `len(name)` (the length of `mdash`)
# - `1` (the length of `;`)
#
# Note that, in case of malformed HTML, the ';' character may
# not be present.
offset = self.getoffset()
ref_end = offset + len(name) + 1
try:
if self.rawdata[ref_end] == ";":
ref_end += 1
except IndexError:
# We are at the end of the string and there's no ';'
pass
if self.last_word_end is None:
if self._word_prefix_regex.match(char):
self.last_word_end = ref_end
elif self._word_regex.match(char):
self.last_word_end = ref_end
else:
self.add_last_word()
def handle_entityref(self, name: str) -> None:
"""
Called when an entity ref like '—' is found
`name` is the entity ref without ampersand and semicolon (e.g. `mdash`)
"""
try:
codepoint = entities.name2codepoint[name]
char = chr(codepoint)
except KeyError:
char = ""
self._handle_ref(name, char)
def handle_charref(self, name: str) -> None:
"""
Called when a char ref like '—' or '—' is found
`name` is the char ref without ampersand and semicolon (e.g. `#8212` or
`#x2014`)
"""
try:
if name.startswith("x"):
codepoint = int(name[1:], 16)
else:
codepoint = int(name)
char = chr(codepoint)
except (ValueError, OverflowError):
char = ""
self._handle_ref("#" + name, char)
def truncate_html_words(s: str, num: int, end_text: str = "…") -> str:
"""Truncates HTML to a certain number of words.
(not counting tags and comments). Closes opened tags if they were correctly
closed in the given html. Takes an optional argument of what should be used
to notify that the string has been truncated, defaulting to ellipsis (…).
Newlines in the HTML are preserved. (From the django framework).
"""
length = int(num)
if length <= 0:
return ""
truncator = _HTMLWordTruncator(length)
truncator.feed(s)
if truncator.truncate_at is None:
return s
out = s[: truncator.truncate_at]
if end_text:
out += " " + end_text
# Close any tags still open
for tag in truncator.open_tags:
out += f"</{tag}>"
# Return string
return out
def truncate_html_paragraphs(s, count):
"""Truncate HTML to a certain number of paragraphs.
:param count: number of paragraphs to keep
Newlines in the HTML are preserved.
"""
paragraphs = []
tag_stop = 0
substr = s[:]
for _ in range(count):
substr = substr[tag_stop:]
tag_start = substr.find("<p>")
tag_stop = substr.find("</p>") + len("</p>")
paragraphs.append(substr[tag_start:tag_stop])
return "".join(paragraphs)
def process_translations(
content_list: list[Content],
translation_id: str | Collection[str] | None = None,
) -> tuple[list[Content], list[Content]]:
"""Finds translations and returns them.
For each content_list item, populates the 'translations' attribute, and
returns a tuple with two lists (index, translations). Index list includes
items in default language or items which have no variant in default
language. Items with the `translation` metadata set to something else than
`False` or `false` will be used as translations, unless all the items in
the same group have that metadata.
Translations and original items are determined relative to one another
amongst items in the same group. Items are in the same group if they
have the same value(s) for the metadata attribute(s) specified by the
'translation_id', which must be a string or a collection of strings.
If 'translation_id' is falsy, the identification of translations is skipped
and all items are returned as originals.
"""
if not translation_id:
return content_list, []
if isinstance(translation_id, str):
translation_id = {translation_id}
index = []
try:
content_list.sort(key=attrgetter(*translation_id))
except TypeError:
raise TypeError(
f"Cannot unpack {translation_id}, 'translation_id' must be falsy, a"
" string or a collection of strings"
) from None
except AttributeError:
raise AttributeError(
f"Cannot use {translation_id} as 'translation_id', there "
"appear to be items without these metadata "
"attributes"
) from None
for id_vals, items in groupby(content_list, attrgetter(*translation_id)):
# prepare warning string
id_vals = (id_vals,) if len(translation_id) == 1 else id_vals
with_str = "with" + ", ".join([' {} "{{}}"'] * len(translation_id)).format(
*translation_id
).format(*id_vals)
items = list(items)
original_items = get_original_items(items, with_str)
index.extend(original_items)
for a in items:
a.translations = [x for x in items if x != a]
translations = [x for x in content_list if x not in index]
return index, translations
def get_original_items(items: list[Content], with_str: str) -> list[Content]:
def _warn_source_paths(msg, items, *extra):
args = [len(items)]
args.extend(extra)
args.extend(x.source_path for x in items)
logger.warning("{}: {}".format(msg, "\n%s" * len(items)), *args)
# warn if several items have the same lang
for lang, lang_items in groupby(items, attrgetter("lang")):
lang_items = list(lang_items)
if len(lang_items) > 1:
_warn_source_paths(
'There are %s items "%s" with lang %s', lang_items, with_str, lang
)
# items with `translation` metadata will be used as translations...
candidate_items = [
i for i in items if i.metadata.get("translation", "false").lower() == "false"
]
# ...unless all items with that slug are translations
if not candidate_items:
_warn_source_paths('All items ("%s") "%s" are translations', items, with_str)
candidate_items = items
# find items with default language
original_items = [i for i in candidate_items if i.in_default_lang]
# if there is no article with default language, go back one step
if not original_items:
original_items = candidate_items
# warn if there are several original items
if len(original_items) > 1:
_warn_source_paths(
"There are %s original (not translated) items %s", original_items, with_str
)
return original_items
def order_content(
content_list: list[Content],
order_by: str | Callable[[Content], Any] | None = "slug",
) -> list[Content]:
"""Sorts content.
order_by can be a string of an attribute or sorting function. If order_by
is defined, content will be ordered by that attribute or sorting function.
By default, content is ordered by slug.
Different content types can have default order_by attributes defined
in settings, e.g. PAGES_ORDER_BY='sort-order', in which case `sort-order`
should be a defined metadata attribute in each page.
"""
if order_by:
if callable(order_by):
try:
content_list.sort(key=order_by)
except Exception:
logger.error("Error sorting with function %s", order_by)
elif isinstance(order_by, str):
if order_by.startswith("reversed-"):
order_reversed = True
order_by = order_by.replace("reversed-", "", 1)
else:
order_reversed = False
if order_by == "basename":
content_list.sort(
key=lambda x: os.path.basename(x.source_path or ""),
reverse=order_reversed,
)
else:
try:
content_list.sort(key=attrgetter(order_by), reverse=order_reversed)
except AttributeError:
for content in content_list:
try:
getattr(content, order_by)
except AttributeError:
logger.warning(
'There is no "%s" attribute in "%s". '
"Defaulting to slug order.",
order_by,
content.get_relative_source_path(),
extra={
"limit_msg": (
"More files are missing "
"the needed attribute."
)
},
)
else:
logger.warning(
"Invalid *_ORDER_BY setting (%s). "
"Valid options are strings and functions.",
order_by,
)
return content_list
def wait_for_changes(
settings_file: str,
settings: Settings,
) -> set[tuple[Change, str]]:
content_path = settings.get("PATH", "")
theme_path = settings.get("THEME", "")
ignore_files = {
fnmatch.translate(pattern) for pattern in settings.get("IGNORE_FILES", [])
}
candidate_paths = [
settings_file,
theme_path,
content_path,
]
candidate_paths.extend(
os.path.join(content_path, path) for path in settings.get("STATIC_PATHS", [])
)
watching_paths = []
for path in candidate_paths:
if not path:
continue
path = os.path.abspath(path)
if not os.path.exists(path):
logger.warning("Unable to watch path '%s' as it does not exist.", path)
else:
watching_paths.append(path)
return next(
watchfiles.watch(
*watching_paths,
watch_filter=watchfiles.DefaultFilter(ignore_entity_patterns=ignore_files), # type: ignore
rust_timeout=0,
)
)
def set_date_tzinfo(
d: datetime.datetime, tz_name: str | None = None
) -> datetime.datetime:
"""Set the timezone for dates that don't have tzinfo"""
if tz_name and not d.tzinfo:
timezone = ZoneInfo(tz_name)
d = d.replace(tzinfo=timezone)
return SafeDatetime(
d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond, d.tzinfo
)
return d
def mkdir_p(path: str) -> None:
os.makedirs(path, exist_ok=True)
def split_all(path: str | pathlib.Path | None) -> Sequence[str] | None:
"""Split a path into a list of components
While os.path.split() splits a single component off the back of
`path`, this function splits all components:
>>> split_all(os.path.join('a', 'b', 'c'))
['a', 'b', 'c']
"""
if isinstance(path, str):
components = []
path = path.lstrip("/")
while path:
head, tail = os.path.split(path)
if tail:
components.insert(0, tail)
elif head == path:
components.insert(0, head)
break
path = head
return components
elif isinstance(path, pathlib.Path):
return path.parts
elif path is None:
return None
else:
raise TypeError(
f'"path" was {type(path)}, must be string, None, or pathlib.Path'
)
def path_to_file_url(path: str) -> str:
"""Convert file-system path to file:// URL"""
return urllib.parse.urljoin("file://", urllib.request.pathname2url(path))
def maybe_pluralize(count: int, singular: str, plural: str) -> str:
"""
Returns a formatted string containing count and plural if count is not 1
Returns count and singular if count is 1
maybe_pluralize(0, 'Article', 'Articles') -> '0 Articles'
maybe_pluralize(1, 'Article', 'Articles') -> '1 Article'
maybe_pluralize(2, 'Article', 'Articles') -> '2 Articles'
"""
selection = plural
if count == 1:
selection = singular
return f"{count} {selection}"
@contextmanager
def temporary_locale(
temp_locale: str | None = None, lc_category: int = locale.LC_ALL
) -> Generator[None, None, None]:
"""
Enable code to run in a context with a temporary locale
Resets the locale back when exiting context.
Use tests.support.TestCaseWithCLocale if you want every unit test in a
class to use the C locale.
"""
orig_locale = locale.setlocale(lc_category)
# python has issue with Turkish_Türkiye.1254 locale, replace it to
# something accepted: Turkish
if orig_locale == "Turkish_Türkiye.1254":
orig_locale = "Turkish"
if temp_locale:
locale.setlocale(lc_category, temp_locale)
yield
locale.setlocale(lc_category, orig_locale)
def file_suffix(path: str) -> str:
"""Return the suffix of a filename in a path."""
_, ext = os.path.splitext(os.path.basename(path))
ret = ""
if len(ext) > 1:
# drop the ".", e.g., "exe", not ".exe"
ret = ext[1:]
return ret
| 31,088
|
Python
|
.py
| 773
| 31.311772
| 103
| 0.602808
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,204
|
cache.py
|
getpelican_pelican/pelican/cache.py
|
import hashlib
import logging
import os
import pickle
from pelican.utils import mkdir_p
logger = logging.getLogger(__name__)
class FileDataCacher:
"""Class that can cache data contained in files"""
def __init__(self, settings, cache_name, caching_policy, load_policy):
"""Load the specified cache within CACHE_PATH in settings
only if *load_policy* is True,
May use gzip if GZIP_CACHE ins settings is True.
Sets caching policy according to *caching_policy*.
"""
self.settings = settings
self._cache_path = os.path.join(self.settings["CACHE_PATH"], cache_name)
self._cache_data_policy = caching_policy
if self.settings["GZIP_CACHE"]:
import gzip
self._cache_open = gzip.open
else:
self._cache_open = open
if load_policy:
try:
with self._cache_open(self._cache_path, "rb") as fhandle:
self._cache = pickle.load(fhandle)
except (OSError, UnicodeDecodeError) as err:
logger.debug(
"Cannot load cache %s (this is normal on first "
"run). Proceeding with empty cache.\n%s",
self._cache_path,
err,
)
self._cache = {}
except pickle.PickleError as err:
logger.warning(
"Cannot unpickle cache %s, cache may be using "
"an incompatible protocol (see pelican "
"caching docs). "
"Proceeding with empty cache.\n%s",
self._cache_path,
err,
)
self._cache = {}
else:
self._cache = {}
def cache_data(self, filename, data):
"""Cache data for given file"""
if self._cache_data_policy:
self._cache[filename] = data
def get_cached_data(self, filename, default=None):
"""Get cached data for the given file
if no data is cached, return the default object
"""
return self._cache.get(filename, default)
def save_cache(self):
"""Save the updated cache"""
if self._cache_data_policy:
try:
mkdir_p(self.settings["CACHE_PATH"])
with self._cache_open(self._cache_path, "wb") as fhandle:
pickle.dump(self._cache, fhandle)
except (OSError, pickle.PicklingError, TypeError) as err:
logger.warning(
"Could not save cache %s\n ... %s", self._cache_path, err
)
class FileStampDataCacher(FileDataCacher):
"""Subclass that also caches the stamp of the file"""
def __init__(self, settings, cache_name, caching_policy, load_policy):
"""This subclass additionally sets filestamp function
and base path for filestamping operations
"""
super().__init__(settings, cache_name, caching_policy, load_policy)
method = self.settings["CHECK_MODIFIED_METHOD"]
if method == "mtime":
self._filestamp_func = os.path.getmtime
else:
try:
hash_func = getattr(hashlib, method)
def filestamp_func(filename):
"""return hash of file contents"""
with open(filename, "rb") as fhandle:
return hash_func(fhandle.read()).digest()
self._filestamp_func = filestamp_func
except AttributeError as err:
logger.warning("Could not get hashing function\n\t%s", err)
self._filestamp_func = None
def cache_data(self, filename, data):
"""Cache stamp and data for the given file"""
stamp = self._get_file_stamp(filename)
super().cache_data(filename, (stamp, data))
def _get_file_stamp(self, filename):
"""Check if the given file has been modified
since the previous build.
depending on CHECK_MODIFIED_METHOD
a float may be returned for 'mtime',
a hash for a function name in the hashlib module
or an empty bytes string otherwise
"""
try:
return self._filestamp_func(filename)
except (OSError, TypeError) as err:
logger.warning("Cannot get modification stamp for %s\n\t%s", filename, err)
return ""
def get_cached_data(self, filename, default=None):
"""Get the cached data for the given filename
if the file has not been modified.
If no record exists or file has been modified, return default.
Modification is checked by comparing the cached
and current file stamp.
"""
stamp, data = super().get_cached_data(filename, (None, default))
if stamp != self._get_file_stamp(filename):
return default
return data
| 4,960
|
Python
|
.py
| 115
| 31.408696
| 87
| 0.575726
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,205
|
__init__.py
|
getpelican_pelican/pelican/__init__.py
|
import argparse
import importlib.metadata
import json
import logging
import multiprocessing
import os
import pprint
import sys
import time
import traceback
from collections.abc import Iterable
# Combines all paths to `pelican` package accessible from `sys.path`
# Makes it possible to install `pelican` and namespace plugins into different
# locations in the file system (e.g. pip with `-e` or `--user`)
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# pelican.log has to be the first pelican module to be loaded
# because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import console, DEFAULT_LOG_HANDLER # noqa: I001
from pelican.log import init as init_logging
from pelican.generators import (
ArticlesGenerator,
PagesGenerator,
SourceFileGenerator,
StaticGenerator,
TemplatePagesGenerator,
)
from pelican.plugins import signals
from pelican.plugins._utils import get_plugin_name, load_plugins
from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer
from pelican.settings import read_settings
from pelican.utils import clean_output_dir, maybe_pluralize, wait_for_changes
from pelican.writers import Writer
try:
__version__ = importlib.metadata.version("pelican")
except Exception:
__version__ = "unknown"
DEFAULT_CONFIG_NAME = "pelicanconf.py"
logger = logging.getLogger(__name__)
class Pelican:
def __init__(self, settings):
"""Pelican initialization
Performs some checks on the environment before doing anything else.
"""
# define the default settings
self.settings = settings
self.path = settings["PATH"]
self.theme = settings["THEME"]
self.output_path = settings["OUTPUT_PATH"]
self.ignore_files = settings["IGNORE_FILES"]
self.delete_outputdir = settings["DELETE_OUTPUT_DIRECTORY"]
self.output_retention = settings["OUTPUT_RETENTION"]
self.init_path()
self.init_plugins()
signals.initialized.send(self)
def init_path(self):
if not any(p in sys.path for p in ["", os.curdir]):
logger.debug("Adding current directory to system path")
sys.path.insert(0, "")
def init_plugins(self):
self.plugins = []
for plugin in load_plugins(self.settings):
name = get_plugin_name(plugin)
logger.debug("Registering plugin `%s`", name)
try:
plugin.register()
self.plugins.append(plugin)
except Exception as e:
logger.error(
"Cannot register plugin `%s`\n%s",
name,
e,
stacklevel=2,
)
if self.settings.get("DEBUG", False):
console.print_exception()
self.settings["PLUGINS"] = [get_plugin_name(p) for p in self.plugins]
def run(self):
"""Run the generators and return"""
start_time = time.time()
context = self.settings.copy()
# Share these among all the generators and content objects
# They map source paths to Content objects or None
context["generated_content"] = {}
context["static_links"] = set()
context["static_content"] = {}
context["localsiteurl"] = self.settings["SITEURL"]
generators = [
cls(
context=context,
settings=self.settings,
path=self.path,
theme=self.theme,
output_path=self.output_path,
)
for cls in self._get_generator_classes()
]
# Delete the output directory if (1) the appropriate setting is True
# and (2) that directory is not the parent of the source directory
if self.delete_outputdir and os.path.commonpath(
[os.path.realpath(self.output_path)]
) != os.path.commonpath(
[os.path.realpath(self.output_path), os.path.realpath(self.path)]
):
clean_output_dir(self.output_path, self.output_retention)
for p in generators:
if hasattr(p, "generate_context"):
p.generate_context()
if hasattr(p, "check_disabled_readers"):
p.check_disabled_readers()
# for plugins that create/edit the summary
logger.debug("Signal all_generators_finalized.send(<generators>)")
signals.all_generators_finalized.send(generators)
# update links in the summary, etc
for p in generators:
if hasattr(p, "refresh_metadata_intersite_links"):
p.refresh_metadata_intersite_links()
writer = self._get_writer()
for p in generators:
if hasattr(p, "generate_output"):
p.generate_output(writer)
signals.finalized.send(self)
articles_generator = next(
g for g in generators if isinstance(g, ArticlesGenerator)
)
pages_generator = next(g for g in generators if isinstance(g, PagesGenerator))
pluralized_articles = maybe_pluralize(
(len(articles_generator.articles) + len(articles_generator.translations)),
"article",
"articles",
)
pluralized_drafts = maybe_pluralize(
(
len(articles_generator.drafts)
+ len(articles_generator.drafts_translations)
),
"draft",
"drafts",
)
pluralized_hidden_articles = maybe_pluralize(
(
len(articles_generator.hidden_articles)
+ len(articles_generator.hidden_translations)
),
"hidden article",
"hidden articles",
)
pluralized_pages = maybe_pluralize(
(len(pages_generator.pages) + len(pages_generator.translations)),
"page",
"pages",
)
pluralized_hidden_pages = maybe_pluralize(
(
len(pages_generator.hidden_pages)
+ len(pages_generator.hidden_translations)
),
"hidden page",
"hidden pages",
)
pluralized_draft_pages = maybe_pluralize(
(
len(pages_generator.draft_pages)
+ len(pages_generator.draft_translations)
),
"draft page",
"draft pages",
)
console.print(
f"Done: Processed {pluralized_articles}, {pluralized_drafts}, {pluralized_hidden_articles}, {pluralized_pages}, {pluralized_hidden_pages} and {pluralized_draft_pages} in {time.time() - start_time:.2f} seconds."
)
def _get_generator_classes(self):
discovered_generators = [
(ArticlesGenerator, "internal"),
(PagesGenerator, "internal"),
]
if self.settings["TEMPLATE_PAGES"]:
discovered_generators.append((TemplatePagesGenerator, "internal"))
if self.settings["OUTPUT_SOURCES"]:
discovered_generators.append((SourceFileGenerator, "internal"))
for receiver, values in signals.get_generators.send(self):
if not isinstance(values, Iterable):
values = (values,)
for generator in values:
if generator is None:
continue # plugin did not return a generator
discovered_generators.append((generator, receiver.__module__))
# StaticGenerator must run last, so it can identify files that
# were skipped by the other generators, and so static files can
# have their output paths overridden by the {attach} link syntax.
discovered_generators.append((StaticGenerator, "internal"))
generators = []
for generator, origin in discovered_generators:
if not isinstance(generator, type):
logger.error("Generator %s (%s) cannot be loaded", generator, origin)
continue
logger.debug("Found generator: %s (%s)", generator.__name__, origin)
generators.append(generator)
return generators
def _get_writer(self):
writers = [w for _, w in signals.get_writer.send(self) if isinstance(w, type)]
num_writers = len(writers)
if num_writers == 0:
return Writer(self.output_path, settings=self.settings)
if num_writers > 1:
logger.warning("%s writers found, using only first one", num_writers)
writer = writers[0]
logger.debug("Found writer: %s (%s)", writer.__name__, writer.__module__)
return writer(self.output_path, settings=self.settings)
class PrintSettings(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
init_logging(name=__name__)
try:
instance, settings = get_instance(namespace)
except Exception as e:
logger.critical("%s: %s", e.__class__.__name__, e)
console.print_exception()
sys.exit(getattr(e, "exitcode", 1))
if values:
# One or more arguments provided, so only print those settings
for setting in values:
if setting in settings:
# Only add newline between setting name and value if dict
if isinstance(settings[setting], (dict, tuple, list)):
setting_format = "\n{}:\n{}"
else:
setting_format = "\n{}: {}"
console.print(
setting_format.format(
setting, pprint.pformat(settings[setting])
)
)
else:
console.print(f"\n{setting} is not a recognized setting.")
break
else:
# No argument was given to --print-settings, so print all settings
console.print(settings)
parser.exit()
class ParseOverrides(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
overrides = {}
for item in values:
try:
k, v = item.split("=", 1)
except ValueError:
raise ValueError(
"Extra settings must be specified as KEY=VALUE pairs "
f"but you specified {item}"
) from None
try:
overrides[k] = json.loads(v)
except json.decoder.JSONDecodeError:
raise ValueError(
f"Invalid JSON value: {v}. "
"Values specified via -e / --extra-settings flags "
"must be in JSON notation. "
"Use -e KEY='\"string\"' to specify a string value; "
"-e KEY=null to specify None; "
"-e KEY=false (or true) to specify False (or True)."
) from None
setattr(namespace, self.dest, overrides)
LOG_HANDLERS = {"plain": None, "rich": DEFAULT_LOG_HANDLER}
def parse_arguments(argv=None):
parser = argparse.ArgumentParser(
description="A tool to generate a static blog, "
" with restructured text input files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
dest="path",
nargs="?",
help="Path where to find the content files.",
default=None,
)
parser.add_argument(
"-t",
"--theme-path",
dest="theme",
help="Path where to find the theme templates. If not "
"specified, it will use the default one included with "
"pelican.",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="Where to output the generated files. If not "
"specified, a directory will be created, named "
'"output" in the current path.',
)
parser.add_argument(
"-s",
"--settings",
dest="settings",
help="The settings of the application, this is "
f"automatically set to {DEFAULT_CONFIG_NAME} if a file exists with this "
"name.",
)
parser.add_argument(
"-d",
"--delete-output-directory",
dest="delete_outputdir",
action="store_true",
default=None,
help="Delete the output directory.",
)
parser.add_argument(
"-v",
"--verbose",
action="store_const",
const=logging.INFO,
dest="verbosity",
help="Show all messages.",
)
parser.add_argument(
"-q",
"--quiet",
action="store_const",
const=logging.CRITICAL,
dest="verbosity",
help="Show only critical errors.",
)
parser.add_argument(
"-D",
"--debug",
action="store_const",
const=logging.DEBUG,
dest="verbosity",
help="Show all messages, including debug messages.",
)
parser.add_argument(
"--version",
action="version",
version=__version__,
help="Print the pelican version and exit.",
)
parser.add_argument(
"-r",
"--autoreload",
dest="autoreload",
action="store_true",
help="Relaunch pelican each time a modification occurs"
" on the content files.",
)
parser.add_argument(
"--print-settings",
dest="print_settings",
nargs="*",
action=PrintSettings,
metavar="SETTING_NAME",
help="Print current configuration settings and exit. "
"Append one or more setting name arguments to see the "
"values for specific settings only.",
)
parser.add_argument(
"--relative-urls",
dest="relative_paths",
action="store_true",
help="Use relative urls in output, useful for site development",
)
parser.add_argument(
"--cache-path",
dest="cache_path",
help=(
"Directory in which to store cache files. "
'If not specified, defaults to "cache".'
),
)
parser.add_argument(
"--ignore-cache",
action="store_true",
dest="ignore_cache",
help="Ignore content cache from previous runs by not loading cache files.",
)
parser.add_argument(
"--fatal",
metavar="errors|warnings",
choices=("errors", "warnings"),
default="",
help=(
"Exit the program with non-zero status if any "
"errors/warnings encountered."
),
)
parser.add_argument(
"--log-handler",
default="rich",
choices=LOG_HANDLERS,
help=(
"Which handler to use to format log messages. "
"The `rich` handler prints output in columns."
),
)
parser.add_argument(
"--logs-dedup-min-level",
default="WARNING",
choices=("DEBUG", "INFO", "WARNING", "ERROR"),
help=(
"Only enable log de-duplication for levels equal"
" to or above the specified value"
),
)
parser.add_argument(
"-l",
"--listen",
dest="listen",
action="store_true",
help="Serve content files via HTTP and port 8000.",
)
parser.add_argument(
"-p",
"--port",
dest="port",
type=int,
help="Port to serve HTTP files at. (default: 8000)",
)
parser.add_argument(
"-b",
"--bind",
dest="bind",
help="IP to bind to when serving files via HTTP (default: 127.0.0.1)",
)
parser.add_argument(
"-e",
"--extra-settings",
dest="overrides",
help="Specify one or more SETTING=VALUE pairs to "
"override settings. VALUE must be in JSON notation: "
"specify string values as SETTING='\"some string\"'; "
"booleans as SETTING=true or SETTING=false; "
"None as SETTING=null.",
nargs="*",
action=ParseOverrides,
default={},
)
args = parser.parse_args(argv)
if args.port is not None and not args.listen:
logger.warning("--port without --listen has no effect")
if args.bind is not None and not args.listen:
logger.warning("--bind without --listen has no effect")
return args
def get_config(args):
"""Builds a config dictionary based on supplied `args`."""
config = {}
if args.path:
config["PATH"] = os.path.abspath(os.path.expanduser(args.path))
if args.output:
config["OUTPUT_PATH"] = os.path.abspath(os.path.expanduser(args.output))
if args.theme:
abstheme = os.path.abspath(os.path.expanduser(args.theme))
config["THEME"] = abstheme if os.path.exists(abstheme) else args.theme
if args.delete_outputdir is not None:
config["DELETE_OUTPUT_DIRECTORY"] = args.delete_outputdir
if args.ignore_cache:
config["LOAD_CONTENT_CACHE"] = False
if args.cache_path:
config["CACHE_PATH"] = args.cache_path
if args.relative_paths:
config["RELATIVE_URLS"] = args.relative_paths
if args.port is not None:
config["PORT"] = args.port
if args.bind is not None:
config["BIND"] = args.bind
config["DEBUG"] = args.verbosity == logging.DEBUG
config.update(args.overrides)
return config
def get_instance(args):
config_file = args.settings
if config_file is None and os.path.isfile(DEFAULT_CONFIG_NAME):
config_file = DEFAULT_CONFIG_NAME
args.settings = DEFAULT_CONFIG_NAME
settings = read_settings(config_file, override=get_config(args))
cls = settings["PELICAN_CLASS"]
if isinstance(cls, str):
module, cls_name = cls.rsplit(".", 1)
module = __import__(module)
cls = getattr(module, cls_name)
return cls(settings), settings
def autoreload(args, excqueue=None):
console.print(
" --- AutoReload Mode: Monitoring `content`, `theme` and"
" `settings` for changes. ---"
)
pelican, settings = get_instance(args)
settings_file = os.path.abspath(args.settings)
while True:
try:
pelican.run()
changed_files = wait_for_changes(args.settings, settings)
changed_files = {c[1] for c in changed_files}
if settings_file in changed_files:
pelican, settings = get_instance(args)
console.print(
"\n-> Modified: {}. re-generating...".format(", ".join(changed_files))
)
except KeyboardInterrupt:
if excqueue is not None:
excqueue.put(None)
return
raise
except Exception as e:
if args.verbosity == logging.DEBUG:
if excqueue is not None:
excqueue.put(traceback.format_exception_only(type(e), e)[-1])
else:
raise
logger.warning(
'Caught exception:\n"%s".', e, exc_info=settings.get("DEBUG", False)
)
def listen(server, port, output, excqueue=None):
# set logging level to at least "INFO" (so we can see the server requests)
if logger.level < logging.INFO:
logger.setLevel(logging.INFO)
RootedHTTPServer.allow_reuse_address = True
try:
httpd = RootedHTTPServer(output, (server, port), ComplexHTTPRequestHandler)
except OSError as e:
logging.error("Could not listen on port %s, server %s.", port, server)
if excqueue is not None:
excqueue.put(traceback.format_exception_only(type(e), e)[-1])
return
try:
console.print(f"Serving site at: http://{server}:{port} - Tap CTRL-C to stop")
httpd.serve_forever()
except Exception as e:
if excqueue is not None:
excqueue.put(traceback.format_exception_only(type(e), e)[-1])
return
except KeyboardInterrupt:
httpd.socket.close()
if excqueue is not None:
return
raise
def main(argv=None):
args = parse_arguments(argv)
logs_dedup_min_level = getattr(logging, args.logs_dedup_min_level)
init_logging(
level=args.verbosity,
fatal=args.fatal,
name=__name__,
handler=LOG_HANDLERS[args.log_handler],
logs_dedup_min_level=logs_dedup_min_level,
)
logger.debug("Pelican version: %s", __version__)
logger.debug("Python version: %s", sys.version.split()[0])
try:
pelican, settings = get_instance(args)
if args.autoreload and args.listen:
excqueue = multiprocessing.Queue()
p1 = multiprocessing.Process(target=autoreload, args=(args, excqueue))
p2 = multiprocessing.Process(
target=listen,
args=(
settings.get("BIND"),
settings.get("PORT"),
settings.get("OUTPUT_PATH"),
excqueue,
),
)
try:
p1.start()
p2.start()
exc = excqueue.get()
if exc is not None:
logger.critical(exc)
finally:
p1.terminate()
p2.terminate()
elif args.autoreload:
autoreload(args)
elif args.listen:
listen(
settings.get("BIND"), settings.get("PORT"), settings.get("OUTPUT_PATH")
)
else:
with console.status("Generating..."):
pelican.run()
except KeyboardInterrupt:
logger.warning("Keyboard interrupt received. Exiting.")
except Exception as e:
logger.critical("%s: %s", e.__class__.__name__, e)
if args.verbosity == logging.DEBUG:
console.print_exception()
sys.exit(getattr(e, "exitcode", 1))
| 22,040
|
Python
|
.py
| 585
| 27.694017
| 222
| 0.58418
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,206
|
log.py
|
getpelican_pelican/pelican/log.py
|
import logging
from collections import defaultdict
from rich.console import Console
from rich.logging import RichHandler
__all__ = ["init"]
console = Console()
class LimitFilter(logging.Filter):
"""
Remove duplicates records, and limit the number of records in the same
group.
Groups are specified by the message to use when the number of records in
the same group hit the limit.
E.g.: log.warning(('43 is not the answer', 'More erroneous answers'))
"""
LOGS_DEDUP_MIN_LEVEL = logging.WARNING
_ignore = set()
_raised_messages = set()
_threshold = 5
_group_count = defaultdict(int)
def filter(self, record):
# don't limit log messages for anything above "warning"
if record.levelno > self.LOGS_DEDUP_MIN_LEVEL:
return True
# extract group
group = record.__dict__.get("limit_msg", None)
group_args = record.__dict__.get("limit_args", ())
# ignore record if it was already raised
message_key = (record.levelno, record.getMessage())
if message_key in self._raised_messages:
return False
else:
self._raised_messages.add(message_key)
# ignore LOG_FILTER records by templates or messages
# when "debug" isn't enabled
logger_level = logging.getLogger().getEffectiveLevel()
if logger_level > logging.DEBUG:
template_key = (record.levelno, record.msg)
message_key = (record.levelno, record.getMessage())
if template_key in self._ignore or message_key in self._ignore:
return False
# check if we went over threshold
if group:
key = (record.levelno, group)
self._group_count[key] += 1
if self._group_count[key] == self._threshold:
record.msg = group
record.args = group_args
elif self._group_count[key] > self._threshold:
return False
return True
class LimitLogger(logging.Logger):
"""
A logger which adds LimitFilter automatically
"""
limit_filter = LimitFilter()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.enable_filter()
def disable_filter(self):
self.removeFilter(LimitLogger.limit_filter)
def enable_filter(self):
self.addFilter(LimitLogger.limit_filter)
class FatalLogger(LimitLogger):
warnings_fatal = False
errors_fatal = False
def warning(self, *args, stacklevel=1, **kwargs):
"""
Displays a logging warning.
Wrapping it here allows Pelican to filter warnings, and conditionally
make warnings fatal.
Args:
stacklevel (int): the stacklevel that would be used to display the
calling location, except for this function. Adjusting the
stacklevel allows you to see the "true" calling location of the
warning, rather than this wrapper location.
"""
stacklevel += 1
super().warning(*args, stacklevel=stacklevel, **kwargs)
if FatalLogger.warnings_fatal:
raise RuntimeError("Warning encountered")
def error(self, *args, stacklevel=1, **kwargs):
"""
Displays a logging error.
Wrapping it here allows Pelican to filter errors, and conditionally
make errors non-fatal.
Args:
stacklevel (int): the stacklevel that would be used to display the
calling location, except for this function. Adjusting the
stacklevel allows you to see the "true" calling location of the
error, rather than this wrapper location.
"""
stacklevel += 1
super().error(*args, stacklevel=stacklevel, **kwargs)
if FatalLogger.errors_fatal:
raise RuntimeError("Error encountered")
logging.setLoggerClass(FatalLogger)
# force root logger to be of our preferred class
logging.getLogger().__class__ = FatalLogger
DEFAULT_LOG_HANDLER = RichHandler(console=console)
def init(
level=None,
fatal="",
handler=DEFAULT_LOG_HANDLER,
name=None,
logs_dedup_min_level=None,
):
FatalLogger.warnings_fatal = fatal.startswith("warning")
FatalLogger.errors_fatal = bool(fatal)
LOG_FORMAT = "%(message)s"
logging.basicConfig(
level=level,
format=LOG_FORMAT,
datefmt="[%H:%M:%S]",
handlers=[handler] if handler else [],
)
logger = logging.getLogger(name)
if level:
logger.setLevel(level)
if logs_dedup_min_level:
LimitFilter.LOGS_DEDUP_MIN_LEVEL = logs_dedup_min_level
def log_warnings():
import warnings
logging.captureWarnings(True)
warnings.simplefilter("default", DeprecationWarning)
init(logging.DEBUG, name="py.warnings")
if __name__ == "__main__":
init(level=logging.DEBUG, name=__name__)
root_logger = logging.getLogger(__name__)
root_logger.debug("debug")
root_logger.info("info")
root_logger.warning("warning")
root_logger.error("error")
root_logger.critical("critical")
| 5,136
|
Python
|
.py
| 133
| 30.932331
| 78
| 0.650343
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,207
|
contents.py
|
getpelican_pelican/pelican/contents.py
|
import copy
import datetime
import locale
import logging
import os
import re
from datetime import timezone
from html import unescape
from typing import Any, Dict, Optional, Set, Tuple
from urllib.parse import ParseResult, unquote, urljoin, urlparse, urlunparse
try:
from zoneinfo import ZoneInfo
except ModuleNotFoundError:
from backports.zoneinfo import ZoneInfo
from pelican.plugins import signals
from pelican.settings import DEFAULT_CONFIG, Settings
# Import these so that they're available when you import from pelican.contents.
from pelican.urlwrappers import Author, Category, Tag, URLWrapper # NOQA
from pelican.utils import (
deprecated_attribute,
memoized,
path_to_url,
posixize_path,
sanitised_join,
set_date_tzinfo,
slugify,
truncate_html_paragraphs,
truncate_html_words,
)
logger = logging.getLogger(__name__)
class Content:
"""Represents a content.
:param content: the string to parse, containing the original content.
:param metadata: the metadata associated to this page (optional).
:param settings: the settings dictionary (optional).
:param source_path: The location of the source of this content (if any).
:param context: The shared context between generators.
"""
default_template: Optional[str] = None
mandatory_properties: Tuple[str, ...] = ()
@deprecated_attribute(old="filename", new="source_path", since=(3, 2, 0))
def filename():
return None
def __init__(
self,
content: str,
metadata: Optional[Dict[str, Any]] = None,
settings: Optional[Settings] = None,
source_path: Optional[str] = None,
context: Optional[Dict[Any, Any]] = None,
):
if metadata is None:
metadata = {}
if settings is None:
settings = copy.deepcopy(DEFAULT_CONFIG)
self.settings = settings
self._content = content
if context is None:
context = {}
self._context = context
self.translations = []
local_metadata = {}
local_metadata.update(metadata)
# set metadata as attributes
for key, value in local_metadata.items():
if key in ("save_as", "url"):
key = "override_" + key
setattr(self, key.lower(), value)
# also keep track of the metadata attributes available
self.metadata = local_metadata
# default template if it's not defined in page
self.template = self._get_template()
# First, read the authors from "authors", if not, fallback to "author"
# and if not use the settings defined one, if any.
if not hasattr(self, "author"):
if hasattr(self, "authors"):
self.author = self.authors[0]
elif "AUTHOR" in settings:
self.author = Author(settings["AUTHOR"], settings)
if not hasattr(self, "authors") and hasattr(self, "author"):
self.authors = [self.author]
# XXX Split all the following code into pieces, there is too much here.
# manage languages
self.in_default_lang = True
if "DEFAULT_LANG" in settings:
default_lang = settings["DEFAULT_LANG"].lower()
if not hasattr(self, "lang"):
self.lang = default_lang
self.in_default_lang = self.lang == default_lang
# create the slug if not existing, generate slug according to
# setting of SLUG_ATTRIBUTE
if not hasattr(self, "slug"):
if settings["SLUGIFY_SOURCE"] == "title" and hasattr(self, "title"):
value = self.title
elif settings["SLUGIFY_SOURCE"] == "basename" and source_path is not None:
value = os.path.basename(os.path.splitext(source_path)[0])
else:
value = None
if value is not None:
self.slug = slugify(
value,
regex_subs=settings.get("SLUG_REGEX_SUBSTITUTIONS", []),
preserve_case=settings.get("SLUGIFY_PRESERVE_CASE", False),
use_unicode=settings.get("SLUGIFY_USE_UNICODE", False),
)
self.source_path = source_path
self.relative_source_path = self.get_relative_source_path()
# manage the date format
if not hasattr(self, "date_format"):
if hasattr(self, "lang") and self.lang in settings["DATE_FORMATS"]:
self.date_format = settings["DATE_FORMATS"][self.lang]
else:
self.date_format = settings["DEFAULT_DATE_FORMAT"]
if isinstance(self.date_format, tuple):
locale_string = self.date_format[0]
locale.setlocale(locale.LC_ALL, locale_string)
self.date_format = self.date_format[1]
# manage timezone
default_timezone = settings.get("TIMEZONE", "UTC")
timezone = getattr(self, "timezone", default_timezone)
self.timezone = ZoneInfo(timezone)
if hasattr(self, "date"):
self.date = set_date_tzinfo(self.date, timezone)
self.locale_date = self.date.strftime(self.date_format)
if hasattr(self, "modified"):
self.modified = set_date_tzinfo(self.modified, timezone)
self.locale_modified = self.modified.strftime(self.date_format)
# manage status
if not hasattr(self, "status"):
# Previous default of None broke comment plugins and perhaps others
self.status = getattr(self, "default_status", "")
# store the summary metadata if it is set
if "summary" in metadata:
self._summary = metadata["summary"]
signals.content_object_init.send(self)
def __str__(self) -> str:
return self.source_path or repr(self)
def _has_valid_mandatory_properties(self) -> bool:
"""Test mandatory properties are set."""
for prop in self.mandatory_properties:
if not hasattr(self, prop):
logger.error(
"Skipping %s: could not find information about '%s'", self, prop
)
return False
return True
def _has_valid_save_as(self) -> bool:
"""Return true if save_as doesn't write outside output path, false
otherwise."""
try:
output_path = self.settings["OUTPUT_PATH"]
except KeyError:
# we cannot check
return True
try:
sanitised_join(output_path, self.save_as)
except RuntimeError: # outside output_dir
logger.error(
"Skipping %s: file %r would be written outside output path",
self,
self.save_as,
)
return False
return True
def _has_valid_status(self) -> bool:
if hasattr(self, "allowed_statuses"):
if self.status not in self.allowed_statuses:
logger.error(
"Unknown status '%s' for file %s, skipping it. (Not in %s)",
self.status,
self,
self.allowed_statuses,
)
return False
# if undefined we allow all
return True
def is_valid(self) -> bool:
"""Validate Content"""
# Use all() to not short circuit and get results of all validations
return all(
[
self._has_valid_mandatory_properties(),
self._has_valid_save_as(),
self._has_valid_status(),
]
)
@property
def url_format(self) -> Dict[str, Any]:
"""Returns the URL, formatted with the proper values"""
metadata = copy.copy(self.metadata)
path = self.metadata.get("path", self.get_relative_source_path())
metadata.update(
{
"path": path_to_url(path),
"slug": getattr(self, "slug", ""),
"lang": getattr(self, "lang", "en"),
"date": getattr(self, "date", datetime.datetime.now()),
"author": self.author.slug if hasattr(self, "author") else "",
"category": self.category.slug if hasattr(self, "category") else "",
}
)
return metadata
def _expand_settings(self, key: str, klass: Optional[str] = None) -> str:
if not klass:
klass = self.__class__.__name__
fq_key = (f"{klass}_{key}").upper()
return str(self.settings[fq_key]).format(**self.url_format)
def get_url_setting(self, key: str) -> str:
if hasattr(self, "override_" + key):
return getattr(self, "override_" + key)
key = key if self.in_default_lang else f"lang_{key}"
return self._expand_settings(key)
def _link_replacer(self, siteurl: str, m: re.Match) -> str:
what = m.group("what")
value = urlparse(m.group("value"))
path = value.path
origin = m.group("path")
# urllib.parse.urljoin() produces `a.html` for urljoin("..", "a.html")
# so if RELATIVE_URLS are enabled, we fall back to os.path.join() to
# properly get `../a.html`. However, os.path.join() produces
# `baz/http://foo/bar.html` for join("baz", "http://foo/bar.html")
# instead of correct "http://foo/bar.html", so one has to pick a side
# as there is no silver bullet.
if self.settings["RELATIVE_URLS"]:
joiner = os.path.join
else:
joiner = urljoin
# However, it's not *that* simple: urljoin("blog", "index.html")
# produces just `index.html` instead of `blog/index.html` (unlike
# os.path.join()), so in order to get a correct answer one needs to
# append a trailing slash to siteurl in that case. This also makes
# the new behavior fully compatible with Pelican 3.7.1.
if not siteurl.endswith("/"):
siteurl += "/"
# XXX Put this in a different location.
if what in {"filename", "static", "attach"}:
def _get_linked_content(key: str, url: ParseResult) -> Optional[Content]:
nonlocal value
def _find_path(path: str) -> Optional[Content]:
if path.startswith("/"):
path = path[1:]
else:
# relative to the source path of this content
path = self.get_relative_source_path( # type: ignore
os.path.join(self.relative_dir, path)
)
return self._context[key].get(path, None)
# try path
result = _find_path(url.path)
if result is not None:
return result
# try unquoted path
result = _find_path(unquote(url.path))
if result is not None:
return result
# try html unescaped url
unescaped_url = urlparse(unescape(url.geturl()))
result = _find_path(unescaped_url.path)
if result is not None:
value = unescaped_url
return result
# check if a static file is linked with {filename}
if what == "filename" and key == "generated_content":
linked_content = _get_linked_content("static_content", value)
if linked_content:
logger.warning(
"{filename} used for linking to static"
" content %s in %s. Use {static} instead",
value.path,
self.get_relative_source_path(),
)
return linked_content
return None
if what == "filename":
key = "generated_content"
else:
key = "static_content"
linked_content = _get_linked_content(key, value)
if linked_content:
if what == "attach":
linked_content.attach_to(self) # type: ignore
origin = joiner(siteurl, linked_content.url)
origin = origin.replace("\\", "/") # for Windows paths.
else:
logger.warning(
"Unable to find '%s', skipping url replacement.",
value.geturl(),
extra={
"limit_msg": (
"Other resources were not found "
"and their urls not replaced"
)
},
)
elif what == "category":
origin = joiner(siteurl, Category(path, self.settings).url)
elif what == "tag":
origin = joiner(siteurl, Tag(path, self.settings).url)
elif what == "index":
origin = joiner(siteurl, self.settings["INDEX_SAVE_AS"])
elif what == "author":
origin = joiner(siteurl, Author(path, self.settings).url)
else:
logger.warning(
"Replacement Indicator '%s' not recognized, skipping replacement",
what,
)
# keep all other parts, such as query, fragment, etc.
parts = list(value)
parts[2] = origin
origin = urlunparse(parts)
return "".join((m.group("markup"), m.group("quote"), origin, m.group("quote")))
def _get_intrasite_link_regex(self) -> re.Pattern:
intrasite_link_regex = self.settings["INTRASITE_LINK_REGEX"]
regex = rf"""
(?P<markup><[^\>]+ # match tag with all url-value attributes
(?:href|src|poster|data|cite|formaction|action|content)\s*=\s*)
(?P<quote>["\']) # require value to be quoted
(?P<path>{intrasite_link_regex}(?P<value>.*?)) # the url value
(?P=quote)"""
return re.compile(regex, re.X)
def _update_content(self, content: str, siteurl: str) -> str:
"""Update the content attribute.
Change all the relative paths of the content to relative paths
suitable for the output content.
:param content: content resource that will be passed to the templates.
:param siteurl: siteurl which is locally generated by the writer in
case of RELATIVE_URLS.
"""
if not content:
return content
hrefs = self._get_intrasite_link_regex()
return hrefs.sub(lambda m: self._link_replacer(siteurl, m), content)
def get_static_links(self) -> Set[str]:
static_links = set()
hrefs = self._get_intrasite_link_regex()
for m in hrefs.finditer(self._content):
what = m.group("what")
value = urlparse(m.group("value"))
path = value.path
if what not in {"static", "attach"}:
continue
if path.startswith("/"):
path = path[1:]
else:
# relative to the source path of this content
path = self.get_relative_source_path(
os.path.join(self.relative_dir, path)
)
path = path.replace("%20", " ") # type: ignore
static_links.add(path)
return static_links
def get_siteurl(self) -> str:
return self._context.get("localsiteurl", "")
@memoized
def get_content(self, siteurl: str) -> str:
if hasattr(self, "_get_content"):
content = self._get_content()
else:
content = self._content
return self._update_content(content, siteurl)
@property
def content(self) -> str:
return self.get_content(self.get_siteurl())
@memoized
def get_summary(self, siteurl: str) -> str:
"""Returns the summary of an article.
This is based on the summary metadata if set, otherwise truncate the
content.
"""
if "summary" in self.metadata:
return self.metadata["summary"]
content = self.content
max_paragraphs = self.settings.get("SUMMARY_MAX_PARAGRAPHS")
if max_paragraphs is not None:
content = truncate_html_paragraphs(self.content, max_paragraphs)
if self.settings["SUMMARY_MAX_LENGTH"] is None:
return content
return truncate_html_words(
self.content,
self.settings["SUMMARY_MAX_LENGTH"],
self.settings["SUMMARY_END_SUFFIX"],
)
@property
def summary(self) -> str:
return self.get_summary(self.get_siteurl())
def _get_summary(self) -> str:
"""deprecated function to access summary"""
logger.warning(
"_get_summary() has been deprecated since 3.6.4. "
"Use the summary decorator instead"
)
return self.summary
@summary.setter
def summary(self, value: str):
"""Dummy function"""
@property
def status(self) -> str:
return self._status
@status.setter
def status(self, value: str) -> None:
# TODO maybe typecheck
self._status = value.lower()
@property
def url(self) -> str:
return self.get_url_setting("url")
@property
def save_as(self) -> str:
return self.get_url_setting("save_as")
def _get_template(self) -> str:
if hasattr(self, "template") and self.template is not None:
return self.template
else:
return self.default_template
def get_relative_source_path(
self, source_path: Optional[str] = None
) -> Optional[str]:
"""Return the relative path (from the content path) to the given
source_path.
If no source path is specified, use the source path of this
content object.
"""
if not source_path:
source_path = self.source_path
if source_path is None:
return None
return posixize_path(
os.path.relpath(
os.path.abspath(os.path.join(self.settings["PATH"], source_path)),
os.path.abspath(self.settings["PATH"]),
)
)
@property
def relative_dir(self) -> str:
return posixize_path(
os.path.dirname(
os.path.relpath(
os.path.abspath(self.source_path),
os.path.abspath(self.settings["PATH"]),
)
)
)
def refresh_metadata_intersite_links(self) -> None:
for key in self.settings["FORMATTED_FIELDS"]:
if key in self.metadata and key != "summary":
value = self._update_content(self.metadata[key], self.get_siteurl())
self.metadata[key] = value
setattr(self, key.lower(), value)
# _summary is an internal variable that some plugins may be writing to,
# so ensure changes to it are picked up, and write summary back to it
if "summary" in self.settings["FORMATTED_FIELDS"]:
if hasattr(self, "_summary"):
self.metadata["summary"] = self._summary
if "summary" in self.metadata:
self.metadata["summary"] = self._update_content(
self.metadata["summary"], self.get_siteurl()
)
self._summary = self.metadata["summary"]
class SkipStub(Content):
"""Stub class representing content that should not be processed in any way."""
def __init__(
self, content, metadata=None, settings=None, source_path=None, context=None
):
self.source_path = source_path
def is_valid(self):
return False
@property
def content(self):
raise NotImplementedError("Stub content should not be read")
@property
def save_as(self):
raise NotImplementedError("Stub content cannot be saved")
class Page(Content):
mandatory_properties = ("title",)
allowed_statuses = ("published", "hidden", "draft", "skip")
default_status = "published"
default_template = "page"
def _expand_settings(self, key: str) -> str:
klass = "draft_page" if self.status == "draft" else None
return super()._expand_settings(key, klass)
class Article(Content):
mandatory_properties = ("title", "date", "category")
allowed_statuses = ("published", "hidden", "draft", "skip")
default_status = "published"
default_template = "article"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# handle WITH_FUTURE_DATES (designate article to draft based on date)
if not self.settings["WITH_FUTURE_DATES"] and hasattr(self, "date"):
if self.date.tzinfo is None:
now = datetime.datetime.now()
else:
now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
if self.date > now:
self.status = "draft"
# if we are a draft and there is no date provided, set max datetime
if not hasattr(self, "date") and self.status == "draft":
self.date = datetime.datetime.max.replace(tzinfo=self.timezone)
def _expand_settings(self, key: str) -> str:
klass = "draft" if self.status == "draft" else "article"
return super()._expand_settings(key, klass)
class Static(Content):
mandatory_properties = ("title",)
default_status = "published"
default_template = None
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._output_location_referenced = False
@deprecated_attribute(old="filepath", new="source_path", since=(3, 2, 0))
def filepath():
return None
@deprecated_attribute(old="src", new="source_path", since=(3, 2, 0))
def src():
return None
@deprecated_attribute(old="dst", new="save_as", since=(3, 2, 0))
def dst():
return None
@property
def url(self) -> str:
# Note when url has been referenced, so we can avoid overriding it.
self._output_location_referenced = True
return super().url
@property
def save_as(self) -> str:
# Note when save_as has been referenced, so we can avoid overriding it.
self._output_location_referenced = True
return super().save_as
def attach_to(self, content: Content) -> None:
"""Override our output directory with that of the given content object."""
# Determine our file's new output path relative to the linking
# document. If it currently lives beneath the linking
# document's source directory, preserve that relationship on output.
# Otherwise, make it a sibling.
linking_source_dir = os.path.dirname(content.source_path)
tail_path = os.path.relpath(self.source_path, linking_source_dir)
if tail_path.startswith(os.pardir + os.sep):
tail_path = os.path.basename(tail_path)
new_save_as = os.path.join(os.path.dirname(content.save_as), tail_path)
# We do not build our new url by joining tail_path with the linking
# document's url, because we cannot know just by looking at the latter
# whether it points to the document itself or to its parent directory.
# (An url like 'some/content' might mean a directory named 'some'
# with a file named 'content', or it might mean a directory named
# 'some/content' with a file named 'index.html'.) Rather than trying
# to figure it out by comparing the linking document's url and save_as
# path, we simply build our new url from our new save_as path.
new_url = path_to_url(new_save_as)
def _log_reason(reason: str) -> None:
logger.warning(
"The {attach} link in %s cannot relocate "
"%s because %s. Falling back to "
"{filename} link behavior instead.",
content.get_relative_source_path(),
self.get_relative_source_path(),
reason,
extra={"limit_msg": "More {attach} warnings silenced."},
)
# We never override an override, because we don't want to interfere
# with user-defined overrides that might be in EXTRA_PATH_METADATA.
if hasattr(self, "override_save_as") or hasattr(self, "override_url"):
if new_save_as != self.save_as or new_url != self.url:
_log_reason("its output location was already overridden")
return
# We never change an output path that has already been referenced,
# because we don't want to break links that depend on that path.
if self._output_location_referenced:
if new_save_as != self.save_as or new_url != self.url:
_log_reason("another link already referenced its location")
return
self.override_save_as = new_save_as
self.override_url = new_url
| 25,242
|
Python
|
.py
| 571
| 33.005254
| 87
| 0.578941
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,208
|
readers.py
|
getpelican_pelican/pelican/readers.py
|
import datetime
import logging
import os
import re
from collections import OrderedDict
from html import escape
from html.parser import HTMLParser
from io import StringIO
import docutils
import docutils.core
import docutils.io
from docutils.parsers.rst.languages import get_language as get_docutils_lang
from docutils.writers.html4css1 import HTMLTranslator, Writer
from pelican import rstdirectives # NOQA
from pelican.cache import FileStampDataCacher
from pelican.contents import Author, Category, Page, SkipStub, Tag
from pelican.plugins import signals
from pelican.utils import file_suffix, get_date, pelican_open, posixize_path
try:
from markdown import Markdown
except ImportError:
Markdown = False
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
DUPLICATES_DEFINITIONS_ALLOWED = {
"tags": False,
"date": False,
"modified": False,
"status": False,
"category": False,
"author": False,
"save_as": False,
"url": False,
"authors": False,
"slug": False,
}
METADATA_PROCESSORS = {
"tags": lambda x, y: ([Tag(tag, y) for tag in ensure_metadata_list(x)] or _DISCARD),
"date": lambda x, y: get_date(x.replace("_", " ")),
"modified": lambda x, y: get_date(x),
"status": lambda x, y: x.strip() or _DISCARD,
"category": lambda x, y: _process_if_nonempty(Category, x, y),
"author": lambda x, y: _process_if_nonempty(Author, x, y),
"authors": lambda x, y: (
[Author(author, y) for author in ensure_metadata_list(x)] or _DISCARD
),
"slug": lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
format.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
"""
if isinstance(text, str):
if ";" in text:
text = text.split(";")
else:
text = text.split(",")
return list(OrderedDict.fromkeys([v for v in (w.strip() for w in text) if v]))
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
"""
name = name.strip()
return processor(name, settings) if name else _DISCARD
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
class BaseReader:
"""Base class to read files.
This class is used to process static files, and it can be inherited for
other types of file. A Reader class must have the following attributes:
- enabled: (boolean) tell if the Reader class is enabled. It
generally depends on the import of some dependency.
- file_extensions: a list of file extensions that the Reader will process.
- extensions: a list of extensions to use in the reader (typical use is
Markdown).
"""
enabled = True
file_extensions = ["static"]
extensions = None
def __init__(self, settings):
self.settings = settings
def process_metadata(self, name, value):
if name in METADATA_PROCESSORS:
return METADATA_PROCESSORS[name](value, self.settings)
return value
def read(self, source_path):
"No-op parser"
content = None
metadata = {}
return content, metadata
def disabled_message(self) -> str:
"""Message about why this plugin was disabled."""
return ""
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
super().__init__(document)
self.compact_p = None
def astext(self):
return "".join(self.body)
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def render_node_to_html(document, node, field_body_translator_class):
visitor = field_body_translator_class(document)
node.walkabout(visitor)
return visitor.astext()
class PelicanHTMLWriter(Writer):
def __init__(self):
super().__init__()
self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr("explanation"):
attrs["title"] = node["explanation"]
self.body.append(self.starttag(node, "abbr", "", **attrs))
def depart_abbreviation(self, node):
self.body.append("</abbr>")
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node["alt"] = node.get("alt", "")
return HTMLTranslator.visit_image(self, node)
class RstReader(BaseReader):
"""Reader for reStructuredText files
By default the output HTML is written using
docutils.writers.html4css1.Writer and translated using a subclass of
docutils.writers.html4css1.HTMLTranslator. If you want to override it with
your own writer/translator (e.g. a HTML5-based one), pass your classes to
these two attributes. Look in the source code for details.
writer_class Used for writing contents
field_body_translator_class Used for translating metadata such
as article summary
"""
enabled = bool(docutils)
file_extensions = ["rst"]
writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
lang_code = self.settings.get("DEFAULT_LANG", "en")
if get_docutils_lang(lang_code):
self._language_code = lang_code
else:
logger.warning(
"Docutils has no localization for '%s'. Using 'en' instead.",
lang_code,
)
self._language_code = "en"
def _parse_metadata(self, document, source_path):
"""Return the dict containing document metadata"""
formatted_fields = self.settings["FORMATTED_FIELDS"]
output = {}
if document.first_child_matching_class(docutils.nodes.title) is None:
logger.warning(
"Document title missing in file %s: "
"Ensure exactly one top level section",
source_path,
)
try:
# docutils 0.18.1+
nodes = document.findall(docutils.nodes.docinfo)
except AttributeError:
# docutils 0.18.0 or before
nodes = document.traverse(docutils.nodes.docinfo)
for docinfo in nodes:
for element in docinfo.children:
if element.tagname == "field": # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name.lower() in formatted_fields:
value = render_node_to_html(
document, body_elem, self.field_body_translator_class
)
else:
value = body_elem.astext()
elif element.tagname == "authors": # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, source_path):
extra_params = {
"initial_header_level": "2",
"syntax_highlight": "short",
"input_encoding": "utf-8",
"language_code": self._language_code,
"halt_level": 2,
"traceback": True,
"warning_stream": StringIO(),
"embed_stylesheet": False,
}
user_params = self.settings.get("DOCUTILS_SETTINGS")
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
writer=self.writer_class(), destination_class=docutils.io.StringOutput
)
pub.set_components("standalone", "restructuredtext", "html")
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish()
return pub
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get("body")
metadata = self._parse_metadata(pub.document, source_path)
metadata.setdefault("title", parts.get("title"))
return content, metadata
class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ["md", "markdown", "mkd", "mdown"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
settings = self.settings["MARKDOWN"]
settings.setdefault("extension_configs", {})
settings.setdefault("extensions", [])
for extension in settings["extension_configs"].keys():
if extension not in settings["extensions"]:
settings["extensions"].append(extension)
if "markdown.extensions.meta" not in settings["extensions"]:
settings["extensions"].append("markdown.extensions.meta")
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings["FORMATTED_FIELDS"]
# prevent metadata extraction in fields
self._md.preprocessors.deregister("meta")
output = {}
for name, value in meta.items():
name = name.lower()
if name in formatted_fields:
# formatted metadata is special case and join all list values
formatted_values = "\n".join(value)
# reset the markdown instance to clear any state
self._md.reset()
formatted = self._md.convert(formatted_values)
output[name] = self.process_metadata(name, formatted)
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
if len(value) > 1:
logger.warning(
"Duplicate definition of `%s` for %s. Using first one.",
name,
self._source_path,
)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
output[name] = self.process_metadata(name, value)
else:
# otherwise, handle metadata as single string
output[name] = self.process_metadata(name, value[0])
return output
def read(self, source_path):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(**self.settings["MARKDOWN"])
with pelican_open(source_path) as text:
content = self._md.convert(text)
if hasattr(self._md, "Meta"):
metadata = self._parse_metadata(self._md.Meta)
else:
metadata = {}
return content, metadata
def disabled_message(self) -> str:
return (
"Could not import 'markdown.Markdown'. "
"Have you installed the 'markdown' package?"
)
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ["htm", "html"]
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
super().__init__(convert_charrefs=False)
self.body = ""
self.metadata = {}
self.settings = settings
self._data_buffer = ""
self._filename = filename
self._in_top_level = True
self._in_head = False
self._in_title = False
self._in_body = False
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == "head" and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == "title" and self._in_head:
self._in_title = True
self._data_buffer = ""
elif tag == "body" and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ""
elif tag == "meta" and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == "head":
if self._in_head:
self._in_head = False
self._in_top_level = True
elif self._in_head and tag == "title":
self._in_title = False
self.metadata["title"] = self._data_buffer
elif tag == "body":
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += f"</{escape(tag)}>"
def handle_startendtag(self, tag, attrs):
if tag == "meta" and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += f"<!--{data}-->"
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += f"&{data};"
def handle_charref(self, data):
self._data_buffer += f"&#{data};"
def build_tag(self, tag, attrs, close_tag):
result = f"<{escape(tag)}"
for k, v in attrs:
result += " " + escape(k)
if v is not None:
# If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes.
if '"' in v:
result += f"='{escape(v, quote=False)}'"
else:
result += f'="{escape(v, quote=False)}"'
if close_tag:
return result + " />"
return result + ">"
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, "name")
if name is None:
attr_list = [f'{k}="{v}"' for k, v in attrs]
attr_serialized = ", ".join(attr_list)
logger.warning(
"Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename,
attr_serialized,
)
return
name = name.lower()
contents = self._attr_value(attrs, "content", "")
if not contents:
contents = self._attr_value(attrs, "contents", "")
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={
"limit_msg": "Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'"
},
)
if name == "keywords":
name = "tags"
if name in self.metadata:
# if this metadata already exists (i.e. a previous tag with the
# same name has already been specified then either convert to
# list or append to list
if isinstance(self.metadata[name], list):
self.metadata[name].append(contents)
else:
self.metadata[name] = [self.metadata[name], contents]
else:
self.metadata[name] = contents
@classmethod
def _attr_value(cls, attrs, name, default=None):
return next((x[1] for x in attrs if x[0] == name), default)
def read(self, filename):
"""Parse content and metadata of HTML files"""
with pelican_open(filename) as content:
parser = self._HTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
metadata = {}
for k in parser.metadata:
metadata[k] = self.process_metadata(k, parser.metadata[k])
return parser.body, metadata
class Readers(FileStampDataCacher):
"""Interface for all readers.
This class contains a mapping of file extensions / Reader classes, to know
which Reader class must be used to read a file (based on its extension).
This is customizable both with the 'READERS' setting, and with the
'readers_init' signall for plugins.
"""
def __init__(self, settings=None, cache_name=""):
self.settings = settings or {}
self.readers = {}
self.disabled_readers = {}
# extension => reader for readers that are enabled
self.reader_classes = {}
# extension => reader for readers that are not enabled
disabled_reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug(
"Missing dependencies for %s", ", ".join(cls.file_extensions)
)
for ext in cls.file_extensions:
if cls.enabled:
self.reader_classes[ext] = cls
else:
disabled_reader_classes[ext] = cls
if self.settings["READERS"]:
self.reader_classes.update(self.settings["READERS"])
signals.readers_init.send(self)
for fmt, reader_class in self.reader_classes.items():
if not reader_class:
continue
self.readers[fmt] = reader_class(self.settings)
for fmt, reader_class in disabled_reader_classes.items():
self.disabled_readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (
cache_name != "" and self.settings["CONTENT_CACHING_LAYER"] == "reader"
)
caching_policy = cache_this_level and self.settings["CACHE_CONTENT"]
load_policy = cache_this_level and self.settings["LOAD_CONTENT_CACHE"]
super().__init__(settings, cache_name, caching_policy, load_policy)
@property
def extensions(self):
"""File extensions that will be processed by a reader."""
return self.readers.keys()
@property
def disabled_extensions(self):
return self.disabled_readers.keys()
def read_file(
self,
base_path,
path,
content_class=Page,
fmt=None,
context=None,
preread_signal=None,
preread_sender=None,
context_signal=None,
context_sender=None,
):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug("Read file %s -> %s", source_path, content_class.__name__)
if not fmt:
fmt = file_suffix(path)
if fmt not in self.readers:
raise TypeError("Pelican does not know how to parse %s", path)
if preread_signal:
logger.debug("Signal %s.send(%s)", preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = _filter_discardable_metadata(
default_metadata(settings=self.settings, process=reader.process_metadata)
)
metadata.update(
path_metadata(
full_path=path, source_path=source_path, settings=self.settings
)
)
metadata.update(
_filter_discardable_metadata(
parse_path_metadata(
source_path=source_path,
settings=self.settings,
process=reader.process_metadata,
)
)
)
reader_name = reader.__class__.__name__
metadata["reader"] = reader_name.replace("Reader", "").lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
content, reader_metadata = reader.read(path)
reader_metadata = _filter_discardable_metadata(reader_metadata)
self.cache_data(path, (content, reader_metadata))
metadata.update(reader_metadata)
if content:
# find images with empty alt
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings["TYPOGRIFY"]:
import smartypants
from typogrify.filters import typogrify
typogrify_dashes = self.settings["TYPOGRIFY_DASHES"]
if typogrify_dashes == "oldschool":
smartypants.Attr.default = smartypants.Attr.set2
elif typogrify_dashes == "oldschool_inverted":
smartypants.Attr.default = smartypants.Attr.set3
else:
smartypants.Attr.default = smartypants.Attr.set1
# Tell `smartypants` to also replace " HTML entities with
# smart quotes. This is necessary because Docutils has already
# replaced double quotes with said entities by the time we run
# this filter.
smartypants.Attr.default |= smartypants.Attr.w
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(text, self.settings["TYPOGRIFY_IGNORE_TAGS"])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
if "title" in metadata:
metadata["title"] = typogrify_wrapper(metadata["title"])
if "summary" in metadata:
metadata["summary"] = typogrify_wrapper(metadata["summary"])
if context_signal:
logger.debug(
"Signal %s.send(%s, <metadata>)", context_signal.name, context_sender
)
context_signal.send(context_sender, metadata=metadata)
if metadata.get("status") == "skip":
content_class = SkipStub
return content_class(
content=content,
metadata=metadata,
settings=self.settings,
source_path=path,
context=context,
)
def check_file(self, source_path: str) -> None:
"""Log a warning if a file is processed by a disabled reader."""
reader = self.disabled_readers.get(file_suffix(source_path), None)
if reader:
logger.warning(f"{source_path}: {reader.disabled_message()}")
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(
r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*?)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*?)\5
)
""",
re.X,
)
for match in re.findall(imgs, content):
logger.warning(
"Empty alt attribute for image %s in %s",
os.path.basename(match[1] + match[5]),
path,
extra={"limit_msg": "Other images have empty alt attributes"},
)
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
for name, value in dict(settings.get("DEFAULT_METADATA", {})).items():
if process:
value = process(name, value)
metadata[name] = value
if "DEFAULT_CATEGORY" in settings:
value = settings["DEFAULT_CATEGORY"]
if process:
value = process("category", value)
metadata["category"] = value
if settings.get("DEFAULT_DATE", None) and settings["DEFAULT_DATE"] != "fs":
if isinstance(settings["DEFAULT_DATE"], str):
metadata["date"] = get_date(settings["DEFAULT_DATE"])
else:
metadata["date"] = datetime.datetime(*settings["DEFAULT_DATE"])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get("DEFAULT_DATE", None) == "fs":
metadata["date"] = datetime.datetime.fromtimestamp(
os.stat(full_path).st_mtime
)
metadata["modified"] = metadata["date"]
# Apply EXTRA_PATH_METADATA for the source path and the paths of any
# parent directories. Sorting EPM first ensures that the most specific
# path wins conflicts.
epm = settings.get("EXTRA_PATH_METADATA", {})
for path, meta in sorted(epm.items()):
# Enforce a trailing slash when checking for parent directories.
# This prevents false positives when one file or directory's name
# is a prefix of another's.
dirpath = posixize_path(os.path.join(path, ""))
if source_path == path or source_path.startswith(dirpath):
metadata.update(meta)
return metadata
def parse_path_metadata(source_path, settings=None, process=None):
r"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': datetime.datetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [("FILENAME_METADATA", base), ("PATH_METADATA", source_path)]:
checks.append((settings.get(key, None), data))
if settings.get("USE_FOLDER_AS_CATEGORY", None):
checks.append(("(?P<category>.*)", subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
k = k.lower() # metadata must be lowercase
if v is not None and k not in metadata:
if process:
v = process(k, v)
metadata[k] = v
return metadata
| 28,847
|
Python
|
.py
| 671
| 32.038748
| 88
| 0.578257
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,209
|
server.py
|
getpelican_pelican/pelican/server.py
|
import argparse
import logging
import os
import posixpath
import ssl
import sys
import urllib
from http import server
try:
from magic import from_file as magic_from_file
except ImportError:
magic_from_file = None
from pelican.log import console # noqa: F401
from pelican.log import init as init_logging
logger = logging.getLogger(__name__)
def parse_arguments():
parser = argparse.ArgumentParser(
description="Pelican Development Server",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"port", default=8000, type=int, nargs="?", help="Port to Listen On"
)
parser.add_argument("server", default="", nargs="?", help="Interface to Listen On")
parser.add_argument("--ssl", action="store_true", help="Activate SSL listener")
parser.add_argument(
"--cert",
default="./cert.pem",
nargs="?",
help="Path to certificate file. Relative to current directory",
)
parser.add_argument(
"--key",
default="./key.pem",
nargs="?",
help="Path to certificate key file. Relative to current directory",
)
parser.add_argument(
"--path",
default=".",
help="Path to pelican source directory to serve. Relative to current directory",
)
return parser.parse_args()
class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
SUFFIXES = [".html", "/index.html", "/", ""]
extensions_map = {
**server.SimpleHTTPRequestHandler.extensions_map,
# web fonts
".oft": "font/oft",
".sfnt": "font/sfnt",
".ttf": "font/ttf",
".woff": "font/woff",
".woff2": "font/woff2",
}
def translate_path(self, path):
# abandon query parameters
path = path.split("?", 1)[0]
path = path.split("#", 1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith("/")
path = urllib.parse.unquote(path)
path = posixpath.normpath(path)
words = path.split("/")
words = filter(None, words)
path = self.base_path
for word in words:
if os.path.dirname(word) or word in (os.curdir, os.pardir):
# Ignore components that are not a simple file/directory name
continue
path = os.path.join(path, word)
if trailing_slash:
path += "/"
return path
def do_GET(self):
# cut off a query string
original_path = self.path.split("?", 1)[0]
# try to find file
self.path = self.get_path_that_exists(original_path)
if not self.path:
return
server.SimpleHTTPRequestHandler.do_GET(self)
def get_path_that_exists(self, original_path):
# Try to strip trailing slash
trailing_slash = original_path.endswith("/")
original_path = original_path.rstrip("/")
# Try to detect file by applying various suffixes
tries = []
for suffix in self.SUFFIXES:
if not trailing_slash and suffix == "/":
# if original request does not have trailing slash, skip the '/' suffix
# so that base class can redirect if needed
continue
path = original_path + suffix
if os.path.exists(self.translate_path(path)):
return path
tries.append(path)
logger.warning(
"Unable to find `%s` or variations:\n%s", original_path, "\n".join(tries)
)
return None
def guess_type(self, path):
"""Guess at the mime type for the specified file."""
mimetype = server.SimpleHTTPRequestHandler.guess_type(self, path)
# If the default guess is too generic, try the python-magic library
if mimetype == "application/octet-stream" and magic_from_file:
mimetype = magic_from_file(path, mime=True)
return mimetype
def log_message(self, format, *args):
logger.info(format, *args)
class RootedHTTPServer(server.HTTPServer):
def __init__(self, base_path, *args, **kwargs):
server.HTTPServer.__init__(self, *args, **kwargs)
self.RequestHandlerClass.base_path = base_path
if __name__ == "__main__":
init_logging(level=logging.INFO)
logger.warning(
"'python -m pelican.server' is deprecated.\nThe "
"Pelican development server should be run via "
"'pelican --listen' or 'pelican -l'.\nThis can be combined "
"with regeneration as 'pelican -lr'.\nRerun 'pelican-"
"quickstart' to get new Makefile and tasks.py files."
)
args = parse_arguments()
RootedHTTPServer.allow_reuse_address = True
try:
httpd = RootedHTTPServer(
args.path, (args.server, args.port), ComplexHTTPRequestHandler
)
if args.ssl:
httpd.socket = ssl.wrap_socket(
httpd.socket, keyfile=args.key, certfile=args.cert, server_side=True
)
except ssl.SSLError as e:
logger.error(
"Couldn't open certificate file %s or key file %s", args.cert, args.key
)
logger.error("Could not listen on port %s, server %s.", args.port, args.server)
sys.exit(getattr(e, "exitcode", 1))
logger.info("Serving at port %s, server %s.", args.port, args.server)
try:
httpd.serve_forever()
except KeyboardInterrupt:
logger.info("Shutting down server.")
httpd.socket.close()
| 5,568
|
Python
|
.py
| 144
| 30.625
| 88
| 0.621622
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,210
|
rstdirectives.py
|
getpelican_pelican/pelican/rstdirectives.py
|
import re
from docutils import nodes, utils
from docutils.parsers.rst import Directive, directives, roles
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import TextLexer, get_lexer_by_name
import pelican.settings as pys
class Pygments(Directive):
"""Source code syntax highlighting."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
"anchorlinenos": directives.flag,
"classprefix": directives.unchanged,
"hl_lines": directives.unchanged,
"lineanchors": directives.unchanged,
"linenos": directives.unchanged,
"linenospecial": directives.nonnegative_int,
"linenostart": directives.nonnegative_int,
"linenostep": directives.nonnegative_int,
"lineseparator": directives.unchanged,
"linespans": directives.unchanged,
"nobackground": directives.flag,
"nowrap": directives.flag,
"tagsfile": directives.unchanged,
"tagurlformat": directives.unchanged,
}
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# Fetch the defaults
if pys.PYGMENTS_RST_OPTIONS is not None:
for k, v in pys.PYGMENTS_RST_OPTIONS.items():
# Locally set options overrides the defaults
if k not in self.options:
self.options[k] = v
if "linenos" in self.options and self.options["linenos"] not in (
"table",
"inline",
):
if self.options["linenos"] == "none":
self.options.pop("linenos")
else:
self.options["linenos"] = "table"
for flag in ("nowrap", "nobackground", "anchorlinenos"):
if flag in self.options:
self.options[flag] = True
# noclasses should already default to False, but just in case...
formatter = HtmlFormatter(noclasses=False, **self.options)
parsed = highlight("\n".join(self.content), lexer, formatter)
return [nodes.raw("", parsed, format="html")]
directives.register_directive("code-block", Pygments)
directives.register_directive("sourcecode", Pygments)
_abbr_re = re.compile(r"\((.*)\)$", re.DOTALL)
class abbreviation(nodes.Inline, nodes.TextElement):
pass
def abbr_role(typ, rawtext, text, lineno, inliner, options=None, content=None):
text = utils.unescape(text)
m = _abbr_re.search(text)
if m is None:
return [abbreviation(text, text)], []
abbr = text[: m.start()].strip()
expl = m.group(1)
return [abbreviation(abbr, abbr, explanation=expl)], []
roles.register_local_role("abbr", abbr_role)
| 2,942
|
Python
|
.py
| 71
| 33.338028
| 79
| 0.647141
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,211
|
generators.py
|
getpelican_pelican/pelican/generators.py
|
import calendar
import errno
import fnmatch
import logging
import os
from collections import defaultdict
from functools import partial
from itertools import chain, groupby
from operator import attrgetter
from typing import List, Optional, Set
from jinja2 import (
BaseLoader,
ChoiceLoader,
Environment,
FileSystemLoader,
PrefixLoader,
TemplateNotFound,
)
from pelican.cache import FileStampDataCacher
from pelican.contents import Article, Page, SkipStub, Static
from pelican.plugins import signals
from pelican.plugins._utils import plugin_enabled
from pelican.readers import Readers
from pelican.utils import (
DateFormatter,
copy,
mkdir_p,
order_content,
posixize_path,
process_translations,
)
logger = logging.getLogger(__name__)
class PelicanTemplateNotFound(Exception):
pass
class Generator:
"""Baseclass generator"""
def __init__(
self,
context,
settings,
path,
theme,
output_path,
readers_cache_name="",
**kwargs,
):
self.context = context
self.settings = settings
self.path = path
self.theme = theme
self.output_path = output_path
for arg, value in kwargs.items():
setattr(self, arg, value)
self.readers = Readers(self.settings, readers_cache_name)
# templates cache
self._templates = {}
self._templates_path = list(self.settings["THEME_TEMPLATES_OVERRIDES"])
theme_templates_path = os.path.expanduser(os.path.join(self.theme, "templates"))
self._templates_path.append(theme_templates_path)
theme_loader = FileSystemLoader(theme_templates_path)
simple_theme_path = os.path.dirname(os.path.abspath(__file__))
simple_loader = FileSystemLoader(
os.path.join(simple_theme_path, "themes", "simple", "templates")
)
self.env = Environment(
loader=ChoiceLoader(
[
FileSystemLoader(self._templates_path),
simple_loader, # implicit inheritance
PrefixLoader(
{"!simple": simple_loader, "!theme": theme_loader}
), # explicit ones
]
),
**self.settings["JINJA_ENVIRONMENT"],
)
logger.debug("Template list: %s", self.env.list_templates())
# provide utils.strftime as a jinja filter
self.env.filters.update({"strftime": DateFormatter()})
# get custom Jinja filters from user settings
custom_filters = self.settings["JINJA_FILTERS"]
self.env.filters.update(custom_filters)
# get custom Jinja globals from user settings
custom_globals = self.settings["JINJA_GLOBALS"]
self.env.globals.update(custom_globals)
# get custom Jinja tests from user settings
custom_tests = self.settings["JINJA_TESTS"]
self.env.tests["plugin_enabled"] = partial(
plugin_enabled, plugin_list=self.settings["PLUGINS"]
)
self.env.tests.update(custom_tests)
signals.generator_init.send(self)
def get_template(self, name):
"""Return the template by name.
Use self.theme to get the templates to use, and return a list of
templates ready to use with Jinja2.
"""
if name not in self._templates:
for ext in self.settings["TEMPLATE_EXTENSIONS"]:
try:
self._templates[name] = self.env.get_template(name + ext)
break
except TemplateNotFound:
continue
if name not in self._templates:
raise PelicanTemplateNotFound(
"[templates] unable to load {}[{}] from {}".format(
name,
", ".join(self.settings["TEMPLATE_EXTENSIONS"]),
self._templates_path,
)
)
return self._templates[name]
def _include_path(self, path, extensions=None):
"""Inclusion logic for .get_files(), returns True/False
:param path: potential path to include (relative to content root)
:param extensions: the list of allowed extensions, or False if all
extensions are allowed
"""
if extensions is None:
extensions = tuple(self.readers.extensions)
basename = os.path.basename(path)
# check IGNORE_FILES
ignores = self.settings["IGNORE_FILES"]
if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):
return False
ext = os.path.splitext(basename)[1][1:]
if extensions is False or ext in extensions:
return True
return False
def get_files(
self, paths, exclude: Optional[List[str]] = None, extensions=None
) -> Set[str]:
"""Return a list of files to use, based on rules
:param paths: the list pf paths to search (relative to self.path)
:param exclude: the list of path to exclude
:param extensions: the list of allowed extensions (if False, all
extensions are allowed)
"""
if exclude is None:
exclude = []
# backward compatibility for older generators
if isinstance(paths, str):
paths = [paths]
# group the exclude dir names by parent path, for use with os.walk()
exclusions_by_dirpath = {}
for e in exclude:
parent_path, subdir = os.path.split(os.path.join(self.path, e))
exclusions_by_dirpath.setdefault(parent_path, set()).add(subdir)
files = set()
ignores = self.settings["IGNORE_FILES"]
for path in paths:
# careful: os.path.join() will add a slash when path == ''.
root = os.path.join(self.path, path) if path else self.path
if os.path.isdir(root):
for dirpath, dirs, temp_files in os.walk(
root, topdown=True, followlinks=True
):
excl = exclusions_by_dirpath.get(dirpath, ())
# We copy the `dirs` list as we will modify it in the loop:
for d in list(dirs):
if d in excl or any(
fnmatch.fnmatch(d, ignore) for ignore in ignores
):
if d in dirs:
dirs.remove(d)
reldir = os.path.relpath(dirpath, self.path)
for f in temp_files:
fp = os.path.join(reldir, f)
if self._include_path(fp, extensions):
files.add(fp)
elif os.path.exists(root) and self._include_path(path, extensions):
files.add(path) # can't walk non-directories
return files
def add_source_path(self, content, static=False):
"""Record a source file path that a Generator found and processed.
Store a reference to its Content object, for url lookups later.
"""
location = content.get_relative_source_path()
key = "static_content" if static else "generated_content"
self.context[key][location] = content
def _add_failed_source_path(self, path, static=False):
"""Record a source file path that a Generator failed to process.
(For example, one that was missing mandatory metadata.)
The path argument is expected to be relative to self.path.
"""
key = "static_content" if static else "generated_content"
self.context[key][posixize_path(os.path.normpath(path))] = None
def _is_potential_source_path(self, path, static=False):
"""Return True if path was supposed to be used as a source file.
(This includes all source files that have been found by generators
before this method is called, even if they failed to process.)
The path argument is expected to be relative to self.path.
"""
key = "static_content" if static else "generated_content"
return posixize_path(os.path.normpath(path)) in self.context[key]
def add_static_links(self, content):
"""Add file links in content to context to be processed as Static
content.
"""
self.context["static_links"] |= content.get_static_links()
def _update_context(self, items):
"""Update the context with the given items from the current processor.
Note that dictionary arguments will be converted to a list of tuples.
"""
for item in items:
value = getattr(self, item)
if hasattr(value, "items"):
value = list(value.items()) # py3k safeguard for iterators
self.context[item] = value
def __str__(self):
# return the name of the class for logging purposes
return self.__class__.__name__
def _check_disabled_readers(self, paths, exclude: Optional[List[str]]) -> None:
"""Log warnings for files that would have been processed by disabled readers."""
for fil in self.get_files(
paths, exclude=exclude, extensions=self.readers.disabled_extensions
):
self.readers.check_file(fil)
class CachingGenerator(Generator, FileStampDataCacher):
"""Subclass of Generator and FileStampDataCacher classes
enables content caching, either at the generator or reader level
"""
def __init__(self, *args, **kwargs):
"""Initialize the generator, then set up caching
note the multiple inheritance structure
"""
cls_name = self.__class__.__name__
Generator.__init__(
self, *args, readers_cache_name=(cls_name + "-Readers"), **kwargs
)
cache_this_level = self.settings["CONTENT_CACHING_LAYER"] == "generator"
caching_policy = cache_this_level and self.settings["CACHE_CONTENT"]
load_policy = cache_this_level and self.settings["LOAD_CONTENT_CACHE"]
FileStampDataCacher.__init__(
self, self.settings, cls_name, caching_policy, load_policy
)
def _get_file_stamp(self, filename):
"""Get filestamp for path relative to generator.path"""
filename = os.path.join(self.path, filename)
return super()._get_file_stamp(filename)
class _FileLoader(BaseLoader):
def __init__(self, path, basedir):
self.path = path
self.fullpath = os.path.join(basedir, path)
def get_source(self, environment, template):
if template != self.path or not os.path.exists(self.fullpath):
raise TemplateNotFound(template)
mtime = os.path.getmtime(self.fullpath)
with open(self.fullpath, encoding="utf-8") as f:
source = f.read()
return (source, self.fullpath, lambda: mtime == os.path.getmtime(self.fullpath))
class TemplatePagesGenerator(Generator):
def generate_output(self, writer):
for source, dest in self.settings["TEMPLATE_PAGES"].items():
self.env.loader.loaders.insert(0, _FileLoader(source, self.path))
try:
template = self.env.get_template(source)
rurls = self.settings["RELATIVE_URLS"]
writer.write_file(
dest, template, self.context, rurls, override_output=True, url=""
)
finally:
del self.env.loader.loaders[0]
class ArticlesGenerator(CachingGenerator):
"""Generate blog articles"""
def __init__(self, *args, **kwargs):
"""initialize properties"""
# Published, listed articles
self.articles = [] # only articles in default language
self.translations = []
# Published, unlisted articles
self.hidden_articles = []
self.hidden_translations = []
# Draft articles
self.drafts = [] # only drafts in default language
self.drafts_translations = []
self.dates = {}
self.period_archives = defaultdict(list)
self.tags = defaultdict(list)
self.categories = defaultdict(list)
self.related_posts = []
self.authors = defaultdict(list)
super().__init__(*args, **kwargs)
signals.article_generator_init.send(self)
def generate_feeds(self, writer):
"""Generate the feeds from the current context, and output files."""
if self.settings.get("FEED_ATOM"):
writer.write_feed(
self.articles,
self.context,
self.settings["FEED_ATOM"],
self.settings.get("FEED_ATOM_URL", self.settings["FEED_ATOM"]),
)
if self.settings.get("FEED_RSS"):
writer.write_feed(
self.articles,
self.context,
self.settings["FEED_RSS"],
self.settings.get("FEED_RSS_URL", self.settings["FEED_RSS"]),
feed_type="rss",
)
if self.settings.get("FEED_ALL_ATOM") or self.settings.get("FEED_ALL_RSS"):
all_articles = list(self.articles)
for article in self.articles:
all_articles.extend(article.translations)
order_content(all_articles, order_by=self.settings["ARTICLE_ORDER_BY"])
if self.settings.get("FEED_ALL_ATOM"):
writer.write_feed(
all_articles,
self.context,
self.settings["FEED_ALL_ATOM"],
self.settings.get(
"FEED_ALL_ATOM_URL", self.settings["FEED_ALL_ATOM"]
),
)
if self.settings.get("FEED_ALL_RSS"):
writer.write_feed(
all_articles,
self.context,
self.settings["FEED_ALL_RSS"],
self.settings.get(
"FEED_ALL_RSS_URL", self.settings["FEED_ALL_RSS"]
),
feed_type="rss",
)
for cat, arts in self.categories:
if self.settings.get("CATEGORY_FEED_ATOM"):
writer.write_feed(
arts,
self.context,
str(self.settings["CATEGORY_FEED_ATOM"]).format(slug=cat.slug),
self.settings.get(
"CATEGORY_FEED_ATOM_URL",
str(self.settings["CATEGORY_FEED_ATOM"]),
).format(slug=cat.slug),
feed_title=cat.name,
)
if self.settings.get("CATEGORY_FEED_RSS"):
writer.write_feed(
arts,
self.context,
str(self.settings["CATEGORY_FEED_RSS"]).format(slug=cat.slug),
self.settings.get(
"CATEGORY_FEED_RSS_URL",
str(self.settings["CATEGORY_FEED_RSS"]),
).format(slug=cat.slug),
feed_title=cat.name,
feed_type="rss",
)
for auth, arts in self.authors:
if self.settings.get("AUTHOR_FEED_ATOM"):
writer.write_feed(
arts,
self.context,
str(self.settings["AUTHOR_FEED_ATOM"]).format(slug=auth.slug),
self.settings.get(
"AUTHOR_FEED_ATOM_URL",
str(self.settings["AUTHOR_FEED_ATOM"]),
).format(slug=auth.slug),
feed_title=auth.name,
)
if self.settings.get("AUTHOR_FEED_RSS"):
writer.write_feed(
arts,
self.context,
str(self.settings["AUTHOR_FEED_RSS"]).format(slug=auth.slug),
self.settings.get(
"AUTHOR_FEED_RSS_URL",
str(self.settings["AUTHOR_FEED_RSS"]),
).format(slug=auth.slug),
feed_title=auth.name,
feed_type="rss",
)
if self.settings.get("TAG_FEED_ATOM") or self.settings.get("TAG_FEED_RSS"):
for tag, arts in self.tags.items():
if self.settings.get("TAG_FEED_ATOM"):
writer.write_feed(
arts,
self.context,
str(self.settings["TAG_FEED_ATOM"]).format(slug=tag.slug),
self.settings.get(
"TAG_FEED_ATOM_URL",
str(self.settings["TAG_FEED_ATOM"]),
).format(slug=tag.slug),
feed_title=tag.name,
)
if self.settings.get("TAG_FEED_RSS"):
writer.write_feed(
arts,
self.context,
str(self.settings["TAG_FEED_RSS"]).format(slug=tag.slug),
self.settings.get(
"TAG_FEED_RSS_URL",
str(self.settings["TAG_FEED_RSS"]),
).format(slug=tag.slug),
feed_title=tag.name,
feed_type="rss",
)
if self.settings.get("TRANSLATION_FEED_ATOM") or self.settings.get(
"TRANSLATION_FEED_RSS"
):
translations_feeds = defaultdict(list)
for article in chain(self.articles, self.translations):
translations_feeds[article.lang].append(article)
for lang, items in translations_feeds.items():
items = order_content(items, order_by=self.settings["ARTICLE_ORDER_BY"])
if self.settings.get("TRANSLATION_FEED_ATOM"):
writer.write_feed(
items,
self.context,
str(self.settings["TRANSLATION_FEED_ATOM"]).format(lang=lang),
self.settings.get(
"TRANSLATION_FEED_ATOM_URL",
str(self.settings["TRANSLATION_FEED_ATOM"]),
).format(lang=lang),
)
if self.settings.get("TRANSLATION_FEED_RSS"):
writer.write_feed(
items,
self.context,
str(self.settings["TRANSLATION_FEED_RSS"]).format(lang=lang),
self.settings.get(
"TRANSLATION_FEED_RSS_URL",
str(self.settings["TRANSLATION_FEED_RSS"]),
).format(lang=lang),
feed_type="rss",
)
def generate_articles(self, write):
"""Generate the articles."""
for article in chain(
self.translations,
self.articles,
self.hidden_translations,
self.hidden_articles,
):
signals.article_generator_write_article.send(self, content=article)
write(
article.save_as,
self.get_template(article.template),
self.context,
article=article,
category=article.category,
override_output=hasattr(article, "override_save_as"),
url=article.url,
blog=True,
)
def generate_period_archives(self, write):
"""Generate per-year, per-month, and per-day archives."""
try:
template = self.get_template("period_archives")
except PelicanTemplateNotFound:
template = self.get_template("archives")
for granularity in self.period_archives:
for period in self.period_archives[granularity]:
context = self.context.copy()
context["period"] = period["period"]
context["period_num"] = period["period_num"]
write(
period["save_as"],
template,
context,
articles=period["articles"],
dates=period["dates"],
template_name="period_archives",
blog=True,
url=period["url"],
all_articles=self.articles,
)
def generate_direct_templates(self, write):
"""Generate direct templates pages"""
for template in self.settings["DIRECT_TEMPLATES"]:
save_as = self.settings.get(
f"{template.upper()}_SAVE_AS", f"{template}.html"
)
url = self.settings.get(f"{template.upper()}_URL", f"{template}.html")
if not save_as:
continue
write(
save_as,
self.get_template(template),
self.context,
articles=self.articles,
dates=self.dates,
blog=True,
template_name=template,
page_name=os.path.splitext(save_as)[0],
url=url,
)
def generate_tags(self, write):
"""Generate Tags pages."""
tag_template = self.get_template("tag")
for tag, articles in self.tags.items():
dates = [article for article in self.dates if article in articles]
write(
tag.save_as,
tag_template,
self.context,
tag=tag,
url=tag.url,
articles=articles,
dates=dates,
template_name="tag",
blog=True,
page_name=tag.page_name,
all_articles=self.articles,
)
def generate_categories(self, write):
"""Generate category pages."""
category_template = self.get_template("category")
for cat, articles in self.categories:
dates = [article for article in self.dates if article in articles]
write(
cat.save_as,
category_template,
self.context,
url=cat.url,
category=cat,
articles=articles,
dates=dates,
template_name="category",
blog=True,
page_name=cat.page_name,
all_articles=self.articles,
)
def generate_authors(self, write):
"""Generate Author pages."""
author_template = self.get_template("author")
for aut, articles in self.authors:
dates = [article for article in self.dates if article in articles]
write(
aut.save_as,
author_template,
self.context,
url=aut.url,
author=aut,
articles=articles,
dates=dates,
template_name="author",
blog=True,
page_name=aut.page_name,
all_articles=self.articles,
)
def generate_drafts(self, write):
"""Generate drafts pages."""
for draft in chain(self.drafts_translations, self.drafts):
write(
draft.save_as,
self.get_template(draft.template),
self.context,
article=draft,
category=draft.category,
override_output=hasattr(draft, "override_save_as"),
blog=True,
all_articles=self.articles,
url=draft.url,
)
def generate_pages(self, writer):
"""Generate the pages on the disk"""
write = partial(writer.write_file, relative_urls=self.settings["RELATIVE_URLS"])
# to minimize the number of relative path stuff modification
# in writer, articles pass first
self.generate_articles(write)
self.generate_period_archives(write)
self.generate_direct_templates(write)
# and subfolders after that
self.generate_tags(write)
self.generate_categories(write)
self.generate_authors(write)
self.generate_drafts(write)
def check_disabled_readers(self) -> None:
self._check_disabled_readers(
self.settings["ARTICLE_PATHS"], exclude=self.settings["ARTICLE_EXCLUDES"]
)
def generate_context(self):
"""Add the articles into the shared context"""
all_articles = []
all_drafts = []
hidden_articles = []
for f in self.get_files(
self.settings["ARTICLE_PATHS"], exclude=self.settings["ARTICLE_EXCLUDES"]
):
article = self.get_cached_data(f, None)
if article is None:
try:
article = self.readers.read_file(
base_path=self.path,
path=f,
content_class=Article,
context=self.context,
preread_signal=signals.article_generator_preread,
preread_sender=self,
context_signal=signals.article_generator_context,
context_sender=self,
)
except Exception as e:
logger.error(
"Could not process %s\n%s",
f,
e,
exc_info=self.settings.get("DEBUG", False),
)
self._add_failed_source_path(f)
continue
if isinstance(article, SkipStub):
logger.debug("Safely skipping %s", f)
continue
if not article.is_valid():
self._add_failed_source_path(f)
continue
self.cache_data(f, article)
if article.status == "published":
all_articles.append(article)
elif article.status == "draft":
all_drafts.append(article)
elif article.status == "hidden":
hidden_articles.append(article)
elif article.status == "skip":
raise AssertionError("Documents with 'skip' status should be skipped")
self.add_source_path(article)
self.add_static_links(article)
def _process(arts):
origs, translations = process_translations(
arts, translation_id=self.settings["ARTICLE_TRANSLATION_ID"]
)
origs = order_content(origs, self.settings["ARTICLE_ORDER_BY"])
return origs, translations
self.articles, self.translations = _process(all_articles)
self.hidden_articles, self.hidden_translations = _process(hidden_articles)
self.drafts, self.drafts_translations = _process(all_drafts)
signals.article_generator_pretaxonomy.send(self)
for article in self.articles:
# only main articles are listed in categories and tags
# not translations or hidden articles
self.categories[article.category].append(article)
if hasattr(article, "tags"):
for tag in article.tags:
self.tags[tag].append(article)
for author in getattr(article, "authors", []):
self.authors[author].append(article)
self.dates = list(self.articles)
self.dates.sort(
key=attrgetter("date"), reverse=self.context["NEWEST_FIRST_ARCHIVES"]
)
self.period_archives = self._build_period_archives(
self.dates, self.articles, self.settings
)
# and generate the output :)
# order the categories per name
self.categories = list(self.categories.items())
self.categories.sort(reverse=self.settings["REVERSE_CATEGORY_ORDER"])
self.authors = list(self.authors.items())
self.authors.sort()
self._update_context(
(
"articles",
"drafts",
"hidden_articles",
"dates",
"tags",
"categories",
"authors",
"related_posts",
)
)
# _update_context flattens dicts, which should not happen to
# period_archives, so we update the context directly for it:
self.context["period_archives"] = self.period_archives
self.save_cache()
self.readers.save_cache()
signals.article_generator_finalized.send(self)
def _build_period_archives(self, sorted_articles, articles, settings):
"""
Compute the groupings of articles, with related attributes, for
per-year, per-month, and per-day archives.
"""
period_archives = defaultdict(list)
period_archives_settings = {
"year": {
"save_as": settings["YEAR_ARCHIVE_SAVE_AS"],
"url": settings["YEAR_ARCHIVE_URL"],
},
"month": {
"save_as": settings["MONTH_ARCHIVE_SAVE_AS"],
"url": settings["MONTH_ARCHIVE_URL"],
},
"day": {
"save_as": settings["DAY_ARCHIVE_SAVE_AS"],
"url": settings["DAY_ARCHIVE_URL"],
},
}
granularity_key_func = {
"year": attrgetter("date.year"),
"month": attrgetter("date.year", "date.month"),
"day": attrgetter("date.year", "date.month", "date.day"),
}
for granularity in "year", "month", "day":
save_as_fmt = period_archives_settings[granularity]["save_as"]
url_fmt = period_archives_settings[granularity]["url"]
key_func = granularity_key_func[granularity]
if not save_as_fmt:
# the archives for this period granularity are not needed
continue
for period, group in groupby(sorted_articles, key=key_func):
archive = {}
dates = list(group)
archive["dates"] = dates
archive["articles"] = [a for a in articles if a in dates]
# use the first date to specify the period archive URL
# and save_as; the specific date used does not matter as
# they all belong to the same period
d = dates[0].date
archive["save_as"] = save_as_fmt.format(date=d)
archive["url"] = url_fmt.format(date=d)
if granularity == "year":
archive["period"] = (period,)
archive["period_num"] = (period,)
else:
month_name = calendar.month_name[period[1]]
if granularity == "month":
archive["period"] = (period[0], month_name)
else:
archive["period"] = (period[0], month_name, period[2])
archive["period_num"] = tuple(period)
period_archives[granularity].append(archive)
return period_archives
def generate_output(self, writer):
self.generate_feeds(writer)
self.generate_pages(writer)
signals.article_writer_finalized.send(self, writer=writer)
def refresh_metadata_intersite_links(self):
for e in chain(
self.articles,
self.translations,
self.drafts,
self.drafts_translations,
self.hidden_articles,
self.hidden_translations,
):
if hasattr(e, "refresh_metadata_intersite_links"):
e.refresh_metadata_intersite_links()
class PagesGenerator(CachingGenerator):
"""Generate pages"""
def __init__(self, *args, **kwargs):
self.pages = []
self.translations = []
self.hidden_pages = []
self.hidden_translations = []
self.draft_pages = []
self.draft_translations = []
super().__init__(*args, **kwargs)
signals.page_generator_init.send(self)
def check_disabled_readers(self) -> None:
self._check_disabled_readers(
self.settings["PAGE_PATHS"], exclude=self.settings["PAGE_EXCLUDES"]
)
def generate_context(self):
all_pages = []
hidden_pages = []
draft_pages = []
for f in self.get_files(
self.settings["PAGE_PATHS"], exclude=self.settings["PAGE_EXCLUDES"]
):
page = self.get_cached_data(f, None)
if page is None:
try:
page = self.readers.read_file(
base_path=self.path,
path=f,
content_class=Page,
context=self.context,
preread_signal=signals.page_generator_preread,
preread_sender=self,
context_signal=signals.page_generator_context,
context_sender=self,
)
except Exception as e:
logger.error(
"Could not process %s\n%s",
f,
e,
exc_info=self.settings.get("DEBUG", False),
)
self._add_failed_source_path(f)
continue
if isinstance(page, SkipStub):
logger.debug("Safely skipping %s", f)
continue
if not page.is_valid():
self._add_failed_source_path(f)
continue
self.cache_data(f, page)
if page.status == "published":
all_pages.append(page)
elif page.status == "hidden":
hidden_pages.append(page)
elif page.status == "draft":
draft_pages.append(page)
elif page.status == "skip":
raise AssertionError("Documents with 'skip' status should be skipped")
self.add_source_path(page)
self.add_static_links(page)
def _process(pages):
origs, translations = process_translations(
pages, translation_id=self.settings["PAGE_TRANSLATION_ID"]
)
origs = order_content(origs, self.settings["PAGE_ORDER_BY"])
return origs, translations
self.pages, self.translations = _process(all_pages)
self.hidden_pages, self.hidden_translations = _process(hidden_pages)
self.draft_pages, self.draft_translations = _process(draft_pages)
self._update_context(("pages", "hidden_pages", "draft_pages"))
self.save_cache()
self.readers.save_cache()
signals.page_generator_finalized.send(self)
def generate_output(self, writer):
for page in chain(
self.translations,
self.pages,
self.hidden_translations,
self.hidden_pages,
self.draft_translations,
self.draft_pages,
):
signals.page_generator_write_page.send(self, content=page)
writer.write_file(
page.save_as,
self.get_template(page.template),
self.context,
page=page,
relative_urls=self.settings["RELATIVE_URLS"],
override_output=hasattr(page, "override_save_as"),
url=page.url,
)
signals.page_writer_finalized.send(self, writer=writer)
def refresh_metadata_intersite_links(self):
for e in chain(
self.pages,
self.hidden_pages,
self.hidden_translations,
self.draft_pages,
self.draft_translations,
):
if hasattr(e, "refresh_metadata_intersite_links"):
e.refresh_metadata_intersite_links()
class StaticGenerator(Generator):
"""copy static paths (what you want to copy, like images, medias etc.
to output"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fallback_to_symlinks = False
signals.static_generator_init.send(self)
def check_disabled_readers(self) -> None:
self._check_disabled_readers(
self.settings["STATIC_PATHS"], exclude=self.settings["STATIC_EXCLUDES"]
)
def generate_context(self):
self.staticfiles = []
linked_files = set(self.context["static_links"])
found_files = self.get_files(
self.settings["STATIC_PATHS"],
exclude=self.settings["STATIC_EXCLUDES"],
extensions=False,
)
for f in linked_files | found_files:
# skip content source files unless the user explicitly wants them
if self.settings["STATIC_EXCLUDE_SOURCES"]:
if self._is_potential_source_path(f):
continue
static = self.readers.read_file(
base_path=self.path,
path=f,
content_class=Static,
fmt="static",
context=self.context,
preread_signal=signals.static_generator_preread,
preread_sender=self,
context_signal=signals.static_generator_context,
context_sender=self,
)
self.staticfiles.append(static)
self.add_source_path(static, static=True)
self._update_context(("staticfiles",))
signals.static_generator_finalized.send(self)
def generate_output(self, writer):
self._copy_paths(
self.settings["THEME_STATIC_PATHS"],
self.theme,
self.settings["THEME_STATIC_DIR"],
self.output_path,
os.curdir,
)
for sc in self.context["staticfiles"]:
if self._file_update_required(sc):
self._link_or_copy_staticfile(sc)
else:
logger.debug("%s is up to date, not copying", sc.source_path)
def _copy_paths(self, paths, source, destination, output_path, final_path=None):
"""Copy all the paths from source to destination"""
for path in paths:
source_path = os.path.join(source, path)
if final_path:
if os.path.isfile(source_path):
destination_path = os.path.join(
output_path, destination, final_path, os.path.basename(path)
)
else:
destination_path = os.path.join(
output_path, destination, final_path
)
else:
destination_path = os.path.join(output_path, destination, path)
copy(source_path, destination_path, self.settings["IGNORE_FILES"])
def _file_update_required(self, staticfile):
source_path = os.path.join(self.path, staticfile.source_path)
save_as = os.path.join(self.output_path, staticfile.save_as)
if not os.path.exists(save_as):
return True
elif self.settings["STATIC_CREATE_LINKS"] and os.path.samefile(
source_path, save_as
):
return False
elif (
self.settings["STATIC_CREATE_LINKS"]
and os.path.realpath(save_as) == source_path
):
return False
elif not self.settings["STATIC_CHECK_IF_MODIFIED"]:
return True
else:
return self._source_is_newer(staticfile)
def _source_is_newer(self, staticfile):
source_path = os.path.join(self.path, staticfile.source_path)
save_as = os.path.join(self.output_path, staticfile.save_as)
s_mtime = os.path.getmtime(source_path)
d_mtime = os.path.getmtime(save_as)
return s_mtime - d_mtime > 0.000001 # noqa: PLR2004
def _link_or_copy_staticfile(self, sc):
if self.settings["STATIC_CREATE_LINKS"]:
self._link_staticfile(sc)
else:
self._copy_staticfile(sc)
def _copy_staticfile(self, sc):
source_path = os.path.join(self.path, sc.source_path)
save_as = os.path.join(self.output_path, sc.save_as)
self._mkdir(os.path.dirname(save_as))
copy(source_path, save_as)
logger.info("Copying %s to %s", sc.source_path, sc.save_as)
def _link_staticfile(self, sc):
source_path = os.path.join(self.path, sc.source_path)
save_as = os.path.join(self.output_path, sc.save_as)
self._mkdir(os.path.dirname(save_as))
try:
if os.path.lexists(save_as):
os.unlink(save_as)
logger.info("Linking %s and %s", sc.source_path, sc.save_as)
if self.fallback_to_symlinks:
os.symlink(source_path, save_as)
else:
os.link(source_path, save_as)
except OSError as err:
if err.errno == errno.EXDEV: # 18: Invalid cross-device link
logger.debug(
"Cross-device links not valid. Creating symbolic links instead."
)
self.fallback_to_symlinks = True
self._link_staticfile(sc)
else:
raise err
def _mkdir(self, path):
if os.path.lexists(path) and not os.path.isdir(path):
os.unlink(path)
mkdir_p(path)
class SourceFileGenerator(Generator):
def generate_context(self):
self.output_extension = self.settings["OUTPUT_SOURCES_EXTENSION"]
def _create_source(self, obj):
output_path, _ = os.path.splitext(obj.save_as)
dest = os.path.join(self.output_path, output_path + self.output_extension)
copy(obj.source_path, dest)
def generate_output(self, writer=None):
logger.info("Generating source files...")
for obj in chain(self.context["articles"], self.context["pages"]):
self._create_source(obj)
for obj_trans in obj.translations:
self._create_source(obj_trans)
| 42,508
|
Python
|
.py
| 986
| 29.511156
| 88
| 0.547184
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,212
|
pelican_import.py
|
getpelican_pelican/pelican/tools/pelican_import.py
|
#!/usr/bin/env python
import argparse
import datetime
import logging
import os
import re
import subprocess
import sys
import tempfile
import time
from collections import defaultdict
from html import unescape
from urllib.error import URLError
from urllib.parse import quote, urlparse, urlsplit, urlunsplit
from urllib.request import urlretrieve
import dateutil.parser
# because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import init
from pelican.settings import DEFAULT_CONFIG
from pelican.utils import SafeDatetime, slugify
logger = logging.getLogger(__name__)
def decode_wp_content(content, br=True):
pre_tags = {}
if content.strip() == "":
return ""
content += "\n"
if "<pre" in content:
pre_parts = content.split("</pre>")
last_pre = pre_parts.pop()
content = ""
pre_index = 0
for pre_part in pre_parts:
start = pre_part.find("<pre")
if start == -1:
content = content + pre_part
continue
name = f"<pre wp-pre-tag-{pre_index}></pre>"
pre_tags[name] = pre_part[start:] + "</pre>"
content = content + pre_part[0:start] + name
pre_index += 1
content = content + last_pre
content = re.sub(r"<br />\s*<br />", "\n\n", content)
allblocks = (
"(?:table|thead|tfoot|caption|col|colgroup|tbody|tr|"
"td|th|div|dl|dd|dt|ul|ol|li|pre|select|option|form|"
"map|area|blockquote|address|math|style|p|h[1-6]|hr|"
"fieldset|noscript|samp|legend|section|article|aside|"
"hgroup|header|footer|nav|figure|figcaption|details|"
"menu|summary)"
)
content = re.sub(r"(<" + allblocks + r"[^>]*>)", "\n\\1", content)
content = re.sub(r"(</" + allblocks + r">)", "\\1\n\n", content)
# content = content.replace("\r\n", "\n")
if "<object" in content:
# no <p> inside object/embed
content = re.sub(r"\s*<param([^>]*)>\s*", "<param\\1>", content)
content = re.sub(r"\s*</embed>\s*", "</embed>", content)
# content = re.sub(r'/\n\n+/', '\n\n', content)
pgraphs = filter(lambda s: s != "", re.split(r"\n\s*\n", content))
content = ""
for p in pgraphs:
content = content + "<p>" + p.strip() + "</p>\n"
# under certain strange conditions it could create
# a P of entirely whitespace
content = re.sub(r"<p>\s*</p>", "", content)
content = re.sub(r"<p>([^<]+)</(div|address|form)>", "<p>\\1</p></\\2>", content)
# don't wrap tags
content = re.sub(r"<p>\s*(</?" + allblocks + r"[^>]*>)\s*</p>", "\\1", content)
# problem with nested lists
content = re.sub(r"<p>(<li.*)</p>", "\\1", content)
content = re.sub(r"<p><blockquote([^>]*)>", "<blockquote\\1><p>", content)
content = content.replace("</blockquote></p>", "</p></blockquote>")
content = re.sub(r"<p>\s*(</?" + allblocks + "[^>]*>)", "\\1", content)
content = re.sub(r"(</?" + allblocks + r"[^>]*>)\s*</p>", "\\1", content)
if br:
def _preserve_newline(match):
return match.group(0).replace("\n", "<WPPreserveNewline />")
content = re.sub(r"/<(script|style).*?<\/\\1>/s", _preserve_newline, content)
# optionally make line breaks
content = re.sub(r"(?<!<br />)\s*\n", "<br />\n", content)
content = content.replace("<WPPreserveNewline />", "\n")
content = re.sub(r"(</?" + allblocks + r"[^>]*>)\s*<br />", "\\1", content)
content = re.sub(
r"<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)", "\\1", content
)
content = re.sub(r"\n</p>", "</p>", content)
if pre_tags:
def _multi_replace(dic, string):
pattern = r"|".join(map(re.escape, dic.keys()))
return re.sub(pattern, lambda m: dic[m.group()], string)
content = _multi_replace(pre_tags, content)
# convert [caption] tags into <figure>
content = re.sub(
r"\[caption(?:.*?)(?:caption=\"(.*?)\")?\]"
r"((?:\<a(?:.*?)\>)?(?:\<img.*?\>)(?:\<\/a\>)?)\s?(.*?)\[\/caption\]",
r"<figure>\n\2\n<figcaption>\1\3</figcaption>\n</figure>",
content,
)
return content
def _import_bs4():
"""Import and return bs4, otherwise sys.exit."""
try:
import bs4
except ImportError:
error = (
'Missing dependency "BeautifulSoup4" and "lxml" required to '
"import XML files."
)
sys.exit(error)
return bs4
def file_to_soup(xml, features="xml"):
"""Reads a file, returns soup."""
bs4 = _import_bs4()
with open(xml, encoding="utf-8") as infile:
xmlfile = infile.read()
soup = bs4.BeautifulSoup(xmlfile, features)
return soup
def get_filename(post_name, post_id):
if post_name is None or post_name.isspace():
return post_id
else:
return post_name
def wp2fields(xml, wp_custpost=False):
"""Opens a wordpress XML file, and yield Pelican fields"""
soup = file_to_soup(xml)
items = soup.rss.channel.findAll("item")
for item in items:
if item.find("status").string in ["publish", "draft"]:
try:
# Use HTMLParser due to issues with BeautifulSoup 3
title = unescape(item.title.contents[0])
except IndexError:
title = "No title [{}]".format(item.find("post_name").string)
logger.warning('Post "%s" is lacking a proper title', title)
post_name = item.find("post_name").string
post_id = item.find("post_id").string
filename = get_filename(post_name, post_id)
content = item.find("encoded").string
raw_date = item.find("post_date").string
if raw_date == "0000-00-00 00:00:00":
date = None
else:
date_object = SafeDatetime.strptime(raw_date, "%Y-%m-%d %H:%M:%S")
date = date_object.strftime("%Y-%m-%d %H:%M")
author = item.find("creator").string
categories = [
cat.string for cat in item.findAll("category", {"domain": "category"})
]
tags = [
tag.string for tag in item.findAll("category", {"domain": "post_tag"})
]
# To publish a post the status should be 'published'
status = (
"published"
if item.find("status").string == "publish"
else item.find("status").string
)
kind = "article"
post_type = item.find("post_type").string
if post_type == "page":
kind = "page"
elif wp_custpost:
if post_type == "post":
pass
# Old behaviour was to name everything not a page as an
# article.Theoretically all attachments have status == inherit
# so no attachments should be here. But this statement is to
# maintain existing behaviour in case that doesn't hold true.
elif post_type == "attachment":
pass
else:
kind = post_type
yield (
title,
content,
filename,
date,
author,
categories,
tags,
status,
kind,
"wp-html",
)
def blogger2fields(xml):
"""Opens a blogger XML file, and yield Pelican fields"""
soup = file_to_soup(xml)
entries = soup.feed.findAll("entry")
for entry in entries:
raw_kind = entry.find(
"category", {"scheme": "http://schemas.google.com/g/2005#kind"}
).get("term")
if raw_kind == "http://schemas.google.com/blogger/2008/kind#post":
kind = "article"
elif raw_kind == "http://schemas.google.com/blogger/2008/kind#comment":
kind = "comment"
elif raw_kind == "http://schemas.google.com/blogger/2008/kind#page":
kind = "page"
else:
continue
try:
assert kind != "comment"
filename = entry.find("link", {"rel": "alternate"})["href"]
filename = os.path.splitext(os.path.basename(filename))[0]
except (AssertionError, TypeError, KeyError):
filename = entry.find("id").string.split(".")[-1]
title = entry.find("title").string or ""
content = entry.find("content").string
raw_date = entry.find("published").string
if hasattr(SafeDatetime, "fromisoformat"):
date_object = SafeDatetime.fromisoformat(raw_date)
else:
date_object = SafeDatetime.strptime(raw_date[:23], "%Y-%m-%dT%H:%M:%S.%f")
date = date_object.strftime("%Y-%m-%d %H:%M")
author = entry.find("author").find("name").string
# blogger posts only have tags, no category
tags = [
tag.get("term")
for tag in entry.findAll(
"category", {"scheme": "http://www.blogger.com/atom/ns#"}
)
]
# Drafts have <app:control><app:draft>yes</app:draft></app:control>
status = "published"
try:
if entry.find("control").find("draft").string == "yes":
status = "draft"
except AttributeError:
pass
yield (title, content, filename, date, author, None, tags, status, kind, "html")
def dc2fields(file):
"""Opens a Dotclear export file, and yield pelican fields"""
try:
from bs4 import BeautifulSoup
except ImportError:
error = (
"Missing dependency "
'"BeautifulSoup4" and "lxml" required '
"to import Dotclear files."
)
sys.exit(error)
in_cat = False
in_post = False
category_list = {}
posts = []
with open(file, encoding="utf-8") as f:
for line in f:
# remove final \n
line = line[:-1]
if line.startswith("[category"):
in_cat = True
elif line.startswith("[post"):
in_post = True
elif in_cat:
fields = line.split('","')
if not line:
in_cat = False
else:
# remove 1st and last ""
fields[0] = fields[0][1:]
# fields[-1] = fields[-1][:-1]
category_list[fields[0]] = fields[2]
elif in_post:
if not line:
in_post = False
break
else:
posts.append(line)
print("%i posts read." % len(posts))
subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
for post in posts:
fields = post.split('","')
# post_id = fields[0][1:]
# blog_id = fields[1]
# user_id = fields[2]
cat_id = fields[3]
# post_dt = fields[4]
# post_tz = fields[5]
post_creadt = fields[6]
# post_upddt = fields[7]
# post_password = fields[8]
# post_type = fields[9]
post_format = fields[10]
# post_url = fields[11]
# post_lang = fields[12]
post_title = fields[13]
post_excerpt = fields[14]
post_excerpt_xhtml = fields[15]
post_content = fields[16]
post_content_xhtml = fields[17]
# post_notes = fields[18]
# post_words = fields[19]
# post_status = fields[20]
# post_selected = fields[21]
# post_position = fields[22]
# post_open_comment = fields[23]
# post_open_tb = fields[24]
# nb_comment = fields[25]
# nb_trackback = fields[26]
post_meta = fields[27]
# redirect_url = fields[28][:-1]
# remove seconds
post_creadt = ":".join(post_creadt.split(":")[0:2])
author = ""
categories = []
tags = []
if cat_id:
categories = [category_list[id].strip() for id in cat_id.split(",")]
# Get tags related to a post
tag = (
post_meta.replace("{", "")
.replace("}", "")
.replace('a:1:s:3:\\"tag\\";a:', "")
.replace("a:0:", "")
)
if len(tag) > 1:
if int(len(tag[:1])) == 1:
newtag = tag.split('"')[1]
tags.append(
BeautifulSoup(newtag, "xml")
# bs4 always outputs UTF-8
.decode("utf-8")
)
else:
i = 1
j = 1
while i <= int(tag[:1]):
newtag = tag.split('"')[j].replace("\\", "")
tags.append(
BeautifulSoup(newtag, "xml")
# bs4 always outputs UTF-8
.decode("utf-8")
)
i = i + 1
if j < int(tag[:1]) * 2:
j = j + 2
"""
dotclear2 does not use markdown by default unless
you use the markdown plugin
Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown
"""
if post_format == "markdown":
content = post_excerpt + post_content
else:
content = post_excerpt_xhtml + post_content_xhtml
content = content.replace("\\n", "")
post_format = "html"
kind = "article" # TODO: Recognise pages
status = "published" # TODO: Find a way for draft posts
yield (
post_title,
content,
slugify(post_title, regex_subs=subs),
post_creadt,
author,
categories,
tags,
status,
kind,
post_format,
)
def _get_tumblr_posts(api_key, blogname, offset=0):
import json
import urllib.request as urllib_request
url = (
"https://api.tumblr.com/v2/blog/%s.tumblr.com/"
"posts?api_key=%s&offset=%d&filter=raw"
) % (blogname, api_key, offset)
request = urllib_request.Request(url)
handle = urllib_request.urlopen(request)
posts = json.loads(handle.read().decode("utf-8"))
return posts.get("response").get("posts")
def tumblr2fields(api_key, blogname):
"""Imports Tumblr posts (API v2)"""
offset = 0
posts = _get_tumblr_posts(api_key, blogname, offset)
subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
while len(posts) > 0:
for post in posts:
title = (
post.get("title")
or post.get("source_title")
or post.get("type").capitalize()
)
slug = post.get("slug") or slugify(title, regex_subs=subs)
tags = post.get("tags")
timestamp = post.get("timestamp")
date = SafeDatetime.fromtimestamp(
int(timestamp), tz=datetime.timezone.utc
).strftime("%Y-%m-%d %H:%M:%S%z")
slug = (
SafeDatetime.fromtimestamp(
int(timestamp), tz=datetime.timezone.utc
).strftime("%Y-%m-%d-")
+ slug
)
format = post.get("format")
content = post.get("body")
type = post.get("type")
if type == "photo":
if format == "markdown":
fmtstr = ""
else:
fmtstr = '<img alt="%s" src="%s" />'
content = "\n".join(
fmtstr
% (photo.get("caption"), photo.get("original_size").get("url"))
for photo in post.get("photos")
)
elif type == "quote":
if format == "markdown":
fmtstr = "\n\n— %s"
else:
fmtstr = "<p>— %s</p>"
content = post.get("text") + fmtstr % post.get("source")
elif type == "link":
if format == "markdown":
fmtstr = "[via](%s)\n\n"
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
content = fmtstr % post.get("url") + post.get("description")
elif type == "audio":
if format == "markdown":
fmtstr = "[via](%s)\n\n"
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
content = (
fmtstr % post.get("source_url")
+ post.get("caption")
+ post.get("player")
)
elif type == "video":
if format == "markdown":
fmtstr = "[via](%s)\n\n"
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
source = fmtstr % post.get("source_url")
caption = post.get("caption")
players = [
# If embed_code is False, couldn't get the video
player.get("embed_code") or None
for player in post.get("player")
]
# If there are no embeddable players, say so, once
if len(players) > 0 and all(player is None for player in players):
players = "<p>(This video isn't available anymore.)</p>\n"
else:
players = "\n".join(players)
content = source + caption + players
elif type == "answer":
title = post.get("question")
content = (
"<p>"
'<a href="{}" rel="external nofollow">{}</a>'
": {}"
"</p>\n"
" {}".format(
post.get("asking_name"),
post.get("asking_url"),
post.get("question"),
post.get("answer"),
)
)
content = content.rstrip() + "\n"
kind = "article"
status = "published" # TODO: Find a way for draft posts
yield (
title,
content,
slug,
date,
post.get("blog_name"),
[type],
tags,
status,
kind,
format,
)
offset += len(posts)
posts = _get_tumblr_posts(api_key, blogname, offset)
def strip_medium_post_content(soup) -> str:
"""Strip some tags and attributes from medium post content.
For example, the 'section' and 'div' tags cause trouble while rendering.
The problem with these tags is you can get a section divider (--------------)
that is not between two pieces of content. For example:
Some text.
.. container:: section-divider
--------------
.. container:: section-content
More content.
In this case, pandoc complains: "Unexpected section title or transition."
Also, the "id" and "name" attributes in tags cause similar problems. They show
up in .rst as extra junk that separates transitions.
"""
# Remove tags
# section and div cause problems
# footer also can cause problems, and has nothing we want to keep
# See https://stackoverflow.com/a/8439761
invalid_tags = ["section", "div", "footer"]
for tag in invalid_tags:
for match in soup.findAll(tag):
match.replaceWithChildren()
# Remove attributes
# See https://stackoverflow.com/a/9045719
invalid_attributes = ["name", "id", "class"]
bs4 = _import_bs4()
for tag in soup.descendants:
if isinstance(tag, bs4.element.Tag):
tag.attrs = {
key: value
for key, value in tag.attrs.items()
if key not in invalid_attributes
}
# Get the string of all content, keeping other tags
all_content = "".join(str(element) for element in soup.contents)
return all_content
def mediumpost2fields(filepath: str) -> tuple:
"""Take an HTML post from a medium export, return Pelican fields."""
soup = file_to_soup(filepath, "html.parser")
if not soup:
raise ValueError(f"{filepath} could not be parsed by beautifulsoup")
kind = "article"
content = soup.find("section", class_="e-content")
if not content:
raise ValueError(f"{filepath}: Post has no content")
title = soup.find("title").string or ""
raw_date = soup.find("time", class_="dt-published")
date = None
if raw_date:
# This datetime can include timezone, e.g., "2017-04-21T17:11:55.799Z"
# python before 3.11 can't parse the timezone using datetime.fromisoformat
# See also https://docs.python.org/3.10/library/datetime.html#datetime.datetime.fromisoformat
# "This does not support parsing arbitrary ISO 8601 strings"
# So, we use dateutil.parser, which can handle it.
date_object = dateutil.parser.parse(raw_date.attrs["datetime"])
date = date_object.strftime("%Y-%m-%d %H:%M")
status = "published"
else:
status = "draft"
author = soup.find("a", class_="p-author h-card")
if author:
author = author.string
# Now that we're done with classes, we can strip the content
content = strip_medium_post_content(content)
# medium HTML export doesn't have tag or category
# RSS feed has tags, but it doesn't have all the posts.
tags = ()
slug = medium_slug(filepath)
# TODO: make the fields a python dataclass
return (
title,
content,
slug,
date,
author,
None,
tags,
status,
kind,
"html",
)
def medium_slug(filepath: str) -> str:
"""Make the filepath of a medium exported file into a slug."""
# slug: filename without extension
slug = os.path.basename(filepath)
slug = os.path.splitext(slug)[0]
# A medium export filename looks like date_-title-...html
# But, RST doesn't like "_-" (see https://github.com/sphinx-doc/sphinx/issues/4350)
# so get rid of it
slug = slug.replace("_-", "-")
# drop the hex string medium puts on the end of the filename, why keep it.
# e.g., "-a8a8a8a8" or "---a9a9a9a9"
# also: drafts don't need "--DRAFT"
slug = re.sub(r"((-)+([0-9a-f]+|DRAFT))+$", "", slug)
return slug
def mediumposts2fields(medium_export_dir: str):
"""Take HTML posts in a medium export directory, and yield Pelican fields."""
for file in os.listdir(medium_export_dir):
filename = os.fsdecode(file)
yield mediumpost2fields(os.path.join(medium_export_dir, filename))
def feed2fields(file):
"""Read a feed and yield pelican fields"""
import feedparser
d = feedparser.parse(file)
subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
for entry in d.entries:
date = (
time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed)
if hasattr(entry, "updated_parsed")
else None
)
author = entry.author if hasattr(entry, "author") else None
tags = [e["term"] for e in entry.tags] if hasattr(entry, "tags") else None
slug = slugify(entry.title, regex_subs=subs)
kind = "article"
yield (
entry.title,
entry.description,
slug,
date,
author,
[],
tags,
None,
kind,
"html",
)
def build_header(
title, date, author, categories, tags, slug, status=None, attachments=None
):
"""Build a header from a list of fields"""
from docutils.utils import column_width
header = "{}\n{}\n".format(title, "#" * column_width(title))
if date:
header += f":date: {date}\n"
if author:
header += f":author: {author}\n"
if categories:
header += ":category: {}\n".format(", ".join(categories))
if tags:
header += ":tags: {}\n".format(", ".join(tags))
if slug:
header += f":slug: {slug}\n"
if status:
header += f":status: {status}\n"
if attachments:
header += ":attachments: {}\n".format(", ".join(attachments))
header += "\n"
return header
def build_asciidoc_header(
title, date, author, categories, tags, slug, status=None, attachments=None
):
"""Build a header from a list of fields"""
header = f"= {title}\n"
if author:
header += f"{author}\n"
if date:
header += f"{date}\n"
if categories:
header += ":category: {}\n".format(", ".join(categories))
if tags:
header += ":tags: {}\n".format(", ".join(tags))
if slug:
header += f":slug: {slug}\n"
if status:
header += f":status: {status}\n"
if attachments:
header += ":attachments: {}\n".format(", ".join(attachments))
header += "\n"
return header
def build_markdown_header(
title, date, author, categories, tags, slug, status=None, attachments=None
):
"""Build a header from a list of fields"""
header = f"Title: {title}\n"
if date:
header += f"Date: {date}\n"
if author:
header += f"Author: {author}\n"
if categories:
header += "Category: {}\n".format(", ".join(categories))
if tags:
header += "Tags: {}\n".format(", ".join(tags))
if slug:
header += f"Slug: {slug}\n"
if status:
header += f"Status: {status}\n"
if attachments:
header += "Attachments: {}\n".format(", ".join(attachments))
header += "\n"
return header
def get_ext(out_markup, in_markup="html"):
if out_markup == "asciidoc":
ext = ".adoc"
elif in_markup == "markdown" or out_markup == "markdown":
ext = ".md"
else:
ext = ".rst"
return ext
def get_out_filename(
output_path,
filename,
ext,
kind,
dirpage,
dircat,
categories,
wp_custpost,
slug_subs,
):
filename = os.path.basename(filename)
# Enforce filename restrictions for various filesystems at once; see
# https://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
# we do not need to filter words because an extension will be appended
filename = re.sub(r'[<>:"/\\|?*^% ]', "-", filename) # invalid chars
filename = filename.lstrip(".") # should not start with a dot
if not filename:
filename = "_"
filename = filename[:249] # allow for 5 extra characters
out_filename = os.path.join(output_path, filename + ext)
# option to put page posts in pages/ subdirectory
if dirpage and kind == "page":
pages_dir = os.path.join(output_path, "pages")
if not os.path.isdir(pages_dir):
os.mkdir(pages_dir)
out_filename = os.path.join(pages_dir, filename + ext)
elif not dirpage and kind == "page":
pass
# option to put wp custom post types in directories with post type
# names. Custom post types can also have categories so option to
# create subdirectories with category names
elif kind != "article":
if wp_custpost:
typename = slugify(kind, regex_subs=slug_subs)
else:
typename = ""
kind = "article"
if dircat and (len(categories) > 0):
catname = slugify(categories[0], regex_subs=slug_subs, preserve_case=True)
else:
catname = ""
out_filename = os.path.join(output_path, typename, catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, typename, catname)):
os.makedirs(os.path.join(output_path, typename, catname))
# option to put files in directories with categories names
elif dircat and (len(categories) > 0):
catname = slugify(categories[0], regex_subs=slug_subs, preserve_case=True)
out_filename = os.path.join(output_path, catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, catname)):
os.mkdir(os.path.join(output_path, catname))
return out_filename
def get_attachments(xml):
"""returns a dictionary of posts that have attachments with a list
of the attachment_urls
"""
soup = file_to_soup(xml)
items = soup.rss.channel.findAll("item")
names = {}
attachments = []
for item in items:
kind = item.find("post_type").string
post_name = item.find("post_name").string
post_id = item.find("post_id").string
if kind == "attachment":
attachments.append(
(item.find("post_parent").string, item.find("attachment_url").string)
)
else:
filename = get_filename(post_name, post_id)
names[post_id] = filename
attachedposts = defaultdict(set)
for parent, url in attachments:
try:
parent_name = names[parent]
except KeyError:
# attachment's parent is not a valid post
parent_name = None
attachedposts[parent_name].add(url)
return attachedposts
def download_attachments(output_path, urls):
"""Downloads WordPress attachments and returns a list of paths to
attachments that can be associated with a post (relative path to output
directory). Files that fail to download, will not be added to posts"""
locations = {}
for url in urls:
path = urlparse(url).path
# teardown path and rebuild to negate any errors with
# os.path.join and leading /'s
path = path.split("/")
filename = path.pop(-1)
localpath = ""
for item in path:
if sys.platform != "win32" or ":" not in item:
localpath = os.path.join(localpath, item)
full_path = os.path.join(output_path, localpath)
# Generate percent-encoded URL
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme != "file":
path = quote(path)
url = urlunsplit((scheme, netloc, path, query, fragment))
if not os.path.exists(full_path):
os.makedirs(full_path)
print(f"downloading {filename}")
try:
urlretrieve(url, os.path.join(full_path, filename))
locations[url] = os.path.join(localpath, filename)
except (URLError, OSError) as e:
# Python 2.7 throws an IOError rather Than URLError
logger.warning("No file could be downloaded from %s\n%s", url, e)
return locations
def is_pandoc_needed(in_markup):
return in_markup in ("html", "wp-html")
def get_pandoc_version():
cmd = ["pandoc", "--version"]
try:
output = subprocess.check_output(cmd, text=True)
except (subprocess.CalledProcessError, OSError) as e:
logger.warning("Pandoc version unknown: %s", e)
return ()
return tuple(int(i) for i in output.split()[1].split("."))
def update_links_to_attached_files(content, attachments):
for old_url, new_path in attachments.items():
# url may occur both with http:// and https://
http_url = old_url.replace("https://", "http://")
https_url = old_url.replace("http://", "https://")
for url in [http_url, https_url]:
content = content.replace(url, "{static}" + new_path)
return content
def fields2pelican(
fields,
out_markup,
output_path,
dircat=False,
strip_raw=False,
disable_slugs=False,
dirpage=False,
filename_template=None,
filter_author=None,
wp_custpost=False,
wp_attach=False,
attachments=None,
):
pandoc_version = get_pandoc_version()
posts_require_pandoc = []
slug_subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
for (
title,
content,
filename,
date,
author,
categories,
tags,
status,
kind,
in_markup,
) in fields:
if filter_author and filter_author != author:
continue
if is_pandoc_needed(in_markup) and not pandoc_version:
posts_require_pandoc.append(filename)
slug = not disable_slugs and filename or None
assert slug is None or filename == os.path.basename(
filename
), f"filename is not a basename: {filename}"
if wp_attach and attachments:
try:
urls = attachments[filename]
links = download_attachments(output_path, urls)
except KeyError:
links = None
else:
links = None
ext = get_ext(out_markup, in_markup)
if ext == ".adoc":
header = build_asciidoc_header(
title, date, author, categories, tags, slug, status, attachments
)
elif ext == ".md":
header = build_markdown_header(
title,
date,
author,
categories,
tags,
slug,
status,
links.values() if links else None,
)
else:
out_markup = "rst"
header = build_header(
title,
date,
author,
categories,
tags,
slug,
status,
links.values() if links else None,
)
out_filename = get_out_filename(
output_path,
filename,
ext,
kind,
dirpage,
dircat,
categories,
wp_custpost,
slug_subs,
)
print(out_filename)
if in_markup in ("html", "wp-html"):
with tempfile.TemporaryDirectory() as tmpdir:
html_filename = os.path.join(tmpdir, "pandoc-input.html")
# Replace newlines with paragraphs wrapped with <p> so
# HTML is valid before conversion
if in_markup == "wp-html":
new_content = decode_wp_content(content)
else:
paragraphs = content.splitlines()
paragraphs = [f"<p>{p}</p>" for p in paragraphs]
new_content = "".join(paragraphs)
with open(html_filename, "w", encoding="utf-8") as fp:
fp.write(new_content)
if pandoc_version < (2,):
parse_raw = "--parse-raw" if not strip_raw else ""
wrap_none = (
"--wrap=none" if pandoc_version >= (1, 16) else "--no-wrap"
)
cmd = (
"pandoc --normalize {0} --from=html"
' --to={1} {2} -o "{3}" "{4}"'
)
cmd = cmd.format(
parse_raw,
out_markup if out_markup != "markdown" else "gfm",
wrap_none,
out_filename,
html_filename,
)
else:
from_arg = "-f html+raw_html" if not strip_raw else "-f html"
cmd = 'pandoc {0} --to={1}-smart --wrap=none -o "{2}" "{3}"'
cmd = cmd.format(
from_arg,
out_markup if out_markup != "markdown" else "gfm",
out_filename,
html_filename,
)
try:
rc = subprocess.call(cmd, shell=True)
if rc < 0:
error = "Child was terminated by signal %d" % -rc
sys.exit(error)
elif rc > 0:
error = "Please, check your Pandoc installation."
sys.exit(error)
except OSError as e:
error = f"Pandoc execution failed: {e}"
sys.exit(error)
with open(out_filename, encoding="utf-8") as fs:
content = fs.read()
if out_markup == "markdown":
# In markdown, to insert a <br />, end a line with two
# or more spaces & then a end-of-line
content = content.replace("\\\n ", " \n")
content = content.replace("\\\n", " \n")
if wp_attach and links:
content = update_links_to_attached_files(content, links)
with open(out_filename, "w", encoding="utf-8") as fs:
fs.write(header + content)
if posts_require_pandoc:
logger.error(
"Pandoc must be installed to import the following posts:\n {}".format(
"\n ".join(posts_require_pandoc)
)
)
if wp_attach and attachments and None in attachments:
print("downloading attachments that don't have a parent post")
urls = attachments[None]
download_attachments(output_path, urls)
def main():
parser = argparse.ArgumentParser(
description="Transform feed, Blogger, Dotclear, Tumblr, or "
"WordPress files into reST (rst) or Markdown (md) files. "
"Be sure to have pandoc installed.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(dest="input", help="The input file to read")
parser.add_argument(
"--blogger", action="store_true", dest="blogger", help="Blogger XML export"
)
parser.add_argument(
"--dotclear", action="store_true", dest="dotclear", help="Dotclear export"
)
parser.add_argument(
"--medium", action="store_true", dest="medium", help="Medium export"
)
parser.add_argument(
"--tumblr", action="store_true", dest="tumblr", help="Tumblr export"
)
parser.add_argument(
"--wpfile", action="store_true", dest="wpfile", help="Wordpress XML export"
)
parser.add_argument(
"--feed", action="store_true", dest="feed", help="Feed to parse"
)
parser.add_argument(
"-o", "--output", dest="output", default="content", help="Output path"
)
parser.add_argument(
"-m",
"--markup",
dest="markup",
default="rst",
help="Output markup format (supports rst & markdown)",
)
parser.add_argument(
"--dir-cat",
action="store_true",
dest="dircat",
help="Put files in directories with categories name",
)
parser.add_argument(
"--dir-page",
action="store_true",
dest="dirpage",
help=(
'Put files recognised as pages in "pages/" sub-directory'
" (blogger and wordpress import only)"
),
)
parser.add_argument(
"--filter-author",
dest="author",
help="Import only post from the specified author",
)
parser.add_argument(
"--strip-raw",
action="store_true",
dest="strip_raw",
help="Strip raw HTML code that can't be converted to "
"markup such as flash embeds or iframes (wordpress import only)",
)
parser.add_argument(
"--wp-custpost",
action="store_true",
dest="wp_custpost",
help="Put wordpress custom post types in directories. If used with "
"--dir-cat option directories will be created as "
"/post_type/category/ (wordpress import only)",
)
parser.add_argument(
"--wp-attach",
action="store_true",
dest="wp_attach",
help="(wordpress import only) Download files uploaded to wordpress as "
"attachments. Files will be added to posts as a list in the post "
"header. All files will be downloaded, even if "
"they aren't associated with a post. Files will be downloaded "
"with their original path inside the output directory. "
"e.g. output/wp-uploads/date/postname/file.jpg "
"-- Requires an internet connection --",
)
parser.add_argument(
"--disable-slugs",
action="store_true",
dest="disable_slugs",
help="Disable storing slugs from imported posts within output. "
"With this disabled, your Pelican URLs may not be consistent "
"with your original posts.",
)
parser.add_argument(
"-b", "--blogname", dest="blogname", help="Blog name (Tumblr import only)"
)
args = parser.parse_args()
input_type = None
if args.blogger:
input_type = "blogger"
elif args.dotclear:
input_type = "dotclear"
elif args.medium:
input_type = "medium"
elif args.tumblr:
input_type = "tumblr"
elif args.wpfile:
input_type = "wordpress"
elif args.feed:
input_type = "feed"
else:
error = (
"You must provide one of --blogger, --dotclear, "
"--medium, --tumblr, --wpfile or --feed options"
)
sys.exit(error)
if not os.path.exists(args.output):
try:
os.mkdir(args.output)
except OSError:
error = "Unable to create the output folder: " + args.output
sys.exit(error)
if args.wp_attach and input_type != "wordpress":
error = "You must be importing a wordpress xml to use the --wp-attach option"
sys.exit(error)
if input_type == "blogger":
fields = blogger2fields(args.input)
elif input_type == "dotclear":
fields = dc2fields(args.input)
elif input_type == "medium":
fields = mediumposts2fields(args.input)
elif input_type == "tumblr":
fields = tumblr2fields(args.input, args.blogname)
elif input_type == "wordpress":
fields = wp2fields(args.input, args.wp_custpost or False)
elif input_type == "feed":
fields = feed2fields(args.input)
else:
raise ValueError(f"Unhandled input_type {input_type}")
if args.wp_attach:
attachments = get_attachments(args.input)
else:
attachments = None
# init logging
init()
fields2pelican(
fields,
args.markup,
args.output,
dircat=args.dircat or False,
dirpage=args.dirpage or False,
strip_raw=args.strip_raw or False,
disable_slugs=args.disable_slugs or False,
filter_author=args.author,
wp_custpost=args.wp_custpost or False,
wp_attach=args.wp_attach or False,
attachments=attachments or None,
)
| 42,802
|
Python
|
.py
| 1,119
| 27.92672
| 101
| 0.542318
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,213
|
pelican_themes.py
|
getpelican_pelican/pelican/tools/pelican_themes.py
|
#!/usr/bin/env python
import argparse
import os
import shutil
import sys
def err(msg, die=None):
"""Print an error message and exits if an exit code is given"""
sys.stderr.write(msg + "\n")
if die:
sys.exit(die if isinstance(die, int) else 1)
try:
import pelican
except ImportError:
err(
"Cannot import pelican.\nYou must "
"install Pelican in order to run this script.",
-1,
)
global _THEMES_PATH
_THEMES_PATH = os.path.join(
os.path.dirname(os.path.abspath(pelican.__file__)), "themes"
)
__version__ = "0.2"
_BUILTIN_THEMES = ["simple", "notmyidea"]
def main():
"""Main function"""
parser = argparse.ArgumentParser(description="""Install themes for Pelican""")
excl = parser.add_mutually_exclusive_group()
excl.add_argument(
"-l",
"--list",
dest="action",
action="store_const",
const="list",
help="Show the themes already installed and exit",
)
excl.add_argument(
"-p",
"--path",
dest="action",
action="store_const",
const="path",
help="Show the themes path and exit",
)
excl.add_argument(
"-V",
"--version",
action="version",
version=f"pelican-themes v{__version__}",
help="Print the version of this script",
)
parser.add_argument(
"-i",
"--install",
dest="to_install",
nargs="+",
metavar="theme path",
help="The themes to install",
)
parser.add_argument(
"-r",
"--remove",
dest="to_remove",
nargs="+",
metavar="theme name",
help="The themes to remove",
)
parser.add_argument(
"-U",
"--upgrade",
dest="to_upgrade",
nargs="+",
metavar="theme path",
help="The themes to upgrade",
)
parser.add_argument(
"-s",
"--symlink",
dest="to_symlink",
nargs="+",
metavar="theme path",
help="Same as `--install', but create a symbolic link instead of "
"copying the theme. Useful for theme development",
)
parser.add_argument(
"-c",
"--clean",
dest="clean",
action="store_true",
help="Remove the broken symbolic links of the theme path",
)
parser.add_argument(
"-v", "--verbose", dest="verbose", action="store_true", help="Verbose output"
)
args = parser.parse_args()
to_install = args.to_install or args.to_upgrade
to_sym = args.to_symlink or args.clean
if args.action:
if args.action == "list":
list_themes(args.verbose)
elif args.action == "path":
print(_THEMES_PATH)
elif to_install or args.to_remove or to_sym:
if args.to_remove:
if args.verbose:
print("Removing themes...")
for i in args.to_remove:
remove(i, v=args.verbose)
if args.to_install:
if args.verbose:
print("Installing themes...")
for i in args.to_install:
install(i, v=args.verbose)
if args.to_upgrade:
if args.verbose:
print("Upgrading themes...")
for i in args.to_upgrade:
install(i, v=args.verbose, u=True)
if args.to_symlink:
if args.verbose:
print("Linking themes...")
for i in args.to_symlink:
symlink(i, v=args.verbose)
if args.clean:
if args.verbose:
print("Cleaning the themes directory...")
clean(v=args.verbose)
else:
print("No argument given... exiting.")
def themes():
"""Returns the list of the themes"""
for i in os.listdir(_THEMES_PATH):
e = os.path.join(_THEMES_PATH, i)
if os.path.isdir(e):
if os.path.islink(e):
yield (e, os.readlink(e))
else:
yield (e, None)
def list_themes(v=False):
"""Display the list of the themes"""
for theme_path, link_target in themes():
if not v:
theme_path = os.path.basename(theme_path)
if link_target:
if v:
print(theme_path + (" (symbolic link to `" + link_target + "')"))
else:
print(theme_path + "@")
else:
print(theme_path)
def remove(theme_name, v=False):
"""Removes a theme"""
theme_name = theme_name.replace("/", "")
target = os.path.join(_THEMES_PATH, theme_name)
if theme_name in _BUILTIN_THEMES:
err(
theme_name + " is a builtin theme.\n"
"You cannot remove a builtin theme with this script, "
"remove it by hand if you want."
)
elif os.path.islink(target):
if v:
print("Removing link `" + target + "'")
os.remove(target)
elif os.path.isdir(target):
if v:
print("Removing directory `" + target + "'")
shutil.rmtree(target)
elif os.path.exists(target):
err(target + " : not a valid theme")
else:
err(target + " : no such file or directory")
def install(path, v=False, u=False):
"""Installs a theme"""
if not os.path.exists(path):
err(path + " : no such file or directory")
elif not os.path.isdir(path):
err(path + " : not a directory")
else:
theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name)
exists = os.path.exists(theme_path)
if exists and not u:
err(path + " : already exists")
elif exists:
remove(theme_name, v)
install(path, v)
else:
if v:
print(f"Copying '{path}' to '{theme_path}' ...")
try:
shutil.copytree(path, theme_path)
try:
if os.name == "posix":
for root, dirs, files in os.walk(theme_path):
for d in dirs:
dname = os.path.join(root, d)
os.chmod(dname, 493) # 0o755
for f in files:
fname = os.path.join(root, f)
os.chmod(fname, 420) # 0o644
except OSError as e:
err(
"Cannot change permissions of files "
f"or directory in `{theme_path}':\n{e!s}",
die=False,
)
except Exception as e:
err(f"Cannot copy `{path}' to `{theme_path}':\n{e!s}")
def symlink(path, v=False):
"""Symbolically link a theme"""
path = os.path.realpath(path)
if not os.path.exists(path):
err(path + " : no such file or directory")
elif not os.path.isdir(path):
err(path + " : not a directory")
else:
theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name)
if os.path.exists(theme_path):
err(path + " : already exists")
else:
if v:
print(f"Linking `{path}' to `{theme_path}' ...")
try:
os.symlink(path, theme_path)
except Exception as e:
err(f"Cannot link `{path}' to `{theme_path}':\n{e!s}")
def is_broken_link(path):
"""Returns True if the path given as is a broken symlink"""
path = os.path.realpath(path)
return not os.path.exists(path)
def clean(v=False):
"""Removes the broken symbolic links"""
c = 0
for path in os.listdir(_THEMES_PATH):
path = os.path.join(_THEMES_PATH, path)
if os.path.islink(path) and is_broken_link(path):
if v:
print(f"Removing {path}")
try:
os.remove(path)
except OSError:
print(f"Error: cannot remove {path}")
else:
c += 1
print(f"\nRemoved {c} broken links")
| 8,188
|
Python
|
.py
| 247
| 23.287449
| 85
| 0.526722
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,214
|
pelican_quickstart.py
|
getpelican_pelican/pelican/tools/pelican_quickstart.py
|
#!/usr/bin/env python
import argparse
import locale
import os
from typing import Mapping
from jinja2 import Environment, FileSystemLoader
try:
import zoneinfo
except ModuleNotFoundError:
from backports import zoneinfo
try:
import readline # NOQA
except ImportError:
pass
try:
import tzlocal
if hasattr(tzlocal.get_localzone(), "zone"):
_DEFAULT_TIMEZONE = tzlocal.get_localzone().zone
else:
_DEFAULT_TIMEZONE = tzlocal.get_localzone_name()
except ModuleNotFoundError:
_DEFAULT_TIMEZONE = "Europe/Rome"
from pelican import __version__
locale.setlocale(locale.LC_ALL, "")
try:
_DEFAULT_LANGUAGE = locale.getlocale()[0]
except ValueError:
# Don't fail on macosx: "unknown locale: UTF-8"
_DEFAULT_LANGUAGE = None
if _DEFAULT_LANGUAGE is None:
_DEFAULT_LANGUAGE = "en"
else:
_DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split("_")[0]
_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
_jinja_env = Environment(
loader=FileSystemLoader(_TEMPLATES_DIR),
trim_blocks=True,
keep_trailing_newline=True,
)
_GITHUB_PAGES_BRANCHES = {"personal": "main", "project": "gh-pages"}
CONF = {
"pelican": "pelican",
"pelicanopts": "",
"basedir": os.curdir,
"ftp_host": "localhost",
"ftp_user": "anonymous",
"ftp_target_dir": "/",
"ssh_host": "localhost",
"ssh_port": 22,
"ssh_user": "root",
"ssh_target_dir": "/var/www",
"s3_bucket": "my_s3_bucket",
"cloudfiles_username": "my_rackspace_username",
"cloudfiles_api_key": "my_rackspace_api_key",
"cloudfiles_container": "my_cloudfiles_container",
"dropbox_dir": "~/Dropbox/Public/",
"github_pages_branch": _GITHUB_PAGES_BRANCHES["project"],
"default_pagination": 10,
"siteurl": "",
"lang": _DEFAULT_LANGUAGE,
"timezone": _DEFAULT_TIMEZONE,
}
# url for list of valid timezones
_TZ_URL = "https://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
# Create a 'marked' default path, to determine if someone has supplied
# a path on the command-line.
class _DEFAULT_PATH_TYPE(str): # noqa: SLOT000
is_default_path = True
_DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)
def ask(question, answer=str, default=None, length=None):
if answer == str:
r = ""
while True:
if default:
r = input(f"> {question} [{default}] ")
else:
r = input(f"> {question} ")
r = r.strip()
if len(r) <= 0:
if default:
r = default
break
else:
print("You must enter something")
elif length and len(r) != length:
print(f"Entry must be {length} characters long")
else:
break
return r
elif answer == bool:
r = None
while True:
if default is True:
r = input(f"> {question} (Y/n) ")
elif default is False:
r = input(f"> {question} (y/N) ")
else:
r = input(f"> {question} (y/n) ")
r = r.strip().lower()
if r in ("y", "yes"):
r = True
break
elif r in ("n", "no"):
r = False
break
elif not r:
r = default
break
else:
print("You must answer 'yes' or 'no'")
return r
elif answer == int:
r = None
while True:
if default:
r = input(f"> {question} [{default}] ")
else:
r = input(f"> {question} ")
r = r.strip()
if not r:
r = default
break
try:
r = int(r)
break
except ValueError:
print("You must enter an integer")
return r
else:
raise NotImplementedError("Argument `answer` must be str, bool, or integer")
def ask_timezone(question, default, tzurl):
"""Prompt for time zone and validate input"""
tz_dict = {tz.lower(): tz for tz in zoneinfo.available_timezones()}
while True:
r = ask(question, str, default)
r = r.strip().replace(" ", "_").lower()
if r in tz_dict.keys():
r = tz_dict[r]
break
else:
print(f"Please enter a valid time zone:\n (check [{tzurl}])")
return r
def render_jinja_template(tmpl_name: str, tmpl_vars: Mapping, target_path: str):
try:
with open(
os.path.join(CONF["basedir"], target_path), "w", encoding="utf-8"
) as fd:
_template = _jinja_env.get_template(tmpl_name)
fd.write(_template.render(**tmpl_vars))
except OSError as e:
print(f"Error: {e}")
def main():
parser = argparse.ArgumentParser(
description="A kickstarter for Pelican",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-p", "--path", default=_DEFAULT_PATH, help="The path to generate the blog into"
)
parser.add_argument(
"-t", "--title", metavar="title", help="Set the title of the website"
)
parser.add_argument(
"-a", "--author", metavar="author", help="Set the author name of the website"
)
parser.add_argument(
"-l", "--lang", metavar="lang", help="Set the default web site language"
)
args = parser.parse_args()
print(
f"""Welcome to pelican-quickstart v{__version__}.
This script will help you create a new Pelican-based website.
Please answer the following questions so this script can generate the files
needed by Pelican.
"""
)
project = os.path.join(os.environ.get("VIRTUAL_ENV", os.curdir), ".project")
no_path_was_specified = hasattr(args.path, "is_default_path")
if os.path.isfile(project) and no_path_was_specified:
CONF["basedir"] = open(project).read().rstrip("\n")
print(
"Using project associated with current virtual environment. "
"Will save to:\n{}\n".format(CONF["basedir"])
)
else:
CONF["basedir"] = os.path.abspath(
os.path.expanduser(
ask(
"Where do you want to create your new web site?",
answer=str,
default=args.path,
)
)
)
CONF["sitename"] = ask(
"What will be the title of this web site?", answer=str, default=args.title
)
CONF["author"] = ask(
"Who will be the author of this web site?", answer=str, default=args.author
)
CONF["lang"] = ask(
"What will be the default language of this web site?",
str,
args.lang or CONF["lang"],
2,
)
if ask(
"Do you want to specify a URL prefix? e.g., https://example.com ",
answer=bool,
default=True,
):
CONF["siteurl"] = ask(
"What is your URL prefix? (see above example; no trailing slash)",
str,
CONF["siteurl"],
)
CONF["with_pagination"] = ask(
"Do you want to enable article pagination?",
bool,
bool(CONF["default_pagination"]),
)
if CONF["with_pagination"]:
CONF["default_pagination"] = ask(
"How many articles per page do you want?",
int,
CONF["default_pagination"],
)
else:
CONF["default_pagination"] = False
CONF["timezone"] = ask_timezone(
"What is your time zone?", CONF["timezone"], _TZ_URL
)
automation = ask(
"Do you want to generate a tasks.py/Makefile "
"to automate generation and publishing?",
bool,
True,
)
if automation:
if ask(
"Do you want to upload your website using FTP?", answer=bool, default=False
):
CONF["ftp"] = (True,)
CONF["ftp_host"] = ask(
"What is the hostname of your FTP server?", str, CONF["ftp_host"]
)
CONF["ftp_user"] = ask(
"What is your username on that server?", str, CONF["ftp_user"]
)
CONF["ftp_target_dir"] = ask(
"Where do you want to put your web site on that server?",
str,
CONF["ftp_target_dir"],
)
if ask(
"Do you want to upload your website using SSH?", answer=bool, default=False
):
CONF["ssh"] = (True,)
CONF["ssh_host"] = ask(
"What is the hostname of your SSH server?", str, CONF["ssh_host"]
)
CONF["ssh_port"] = ask(
"What is the port of your SSH server?", int, CONF["ssh_port"]
)
CONF["ssh_user"] = ask(
"What is your username on that server?", str, CONF["ssh_user"]
)
CONF["ssh_target_dir"] = ask(
"Where do you want to put your web site on that server?",
str,
CONF["ssh_target_dir"],
)
if ask(
"Do you want to upload your website using Dropbox?",
answer=bool,
default=False,
):
CONF["dropbox"] = (True,)
CONF["dropbox_dir"] = ask(
"Where is your Dropbox directory?", str, CONF["dropbox_dir"]
)
if ask(
"Do you want to upload your website using S3?", answer=bool, default=False
):
CONF["s3"] = (True,)
CONF["s3_bucket"] = ask(
"What is the name of your S3 bucket?", str, CONF["s3_bucket"]
)
if ask(
"Do you want to upload your website using Rackspace Cloud Files?",
answer=bool,
default=False,
):
CONF["cloudfiles"] = (True,)
CONF["cloudfiles_username"] = ask(
"What is your Rackspace Cloud username?",
str,
CONF["cloudfiles_username"],
)
CONF["cloudfiles_api_key"] = ask(
"What is your Rackspace Cloud API key?",
str,
CONF["cloudfiles_api_key"],
)
CONF["cloudfiles_container"] = ask(
"What is the name of your Cloud Files container?",
str,
CONF["cloudfiles_container"],
)
if ask(
"Do you want to upload your website using GitHub Pages?",
answer=bool,
default=False,
):
CONF["github"] = (True,)
if ask(
"Is this your personal page (username.github.io)?",
answer=bool,
default=False,
):
CONF["github_pages_branch"] = _GITHUB_PAGES_BRANCHES["personal"]
else:
CONF["github_pages_branch"] = _GITHUB_PAGES_BRANCHES["project"]
try:
os.makedirs(os.path.join(CONF["basedir"], "content"))
except OSError as e:
print(f"Error: {e}")
try:
os.makedirs(os.path.join(CONF["basedir"], "output"))
except OSError as e:
print(f"Error: {e}")
conf_python = {}
for key, value in CONF.items():
conf_python[key] = repr(value)
render_jinja_template("pelicanconf.py.jinja2", conf_python, "pelicanconf.py")
render_jinja_template("publishconf.py.jinja2", CONF, "publishconf.py")
if automation:
render_jinja_template("tasks.py.jinja2", CONF, "tasks.py")
render_jinja_template("Makefile.jinja2", CONF, "Makefile")
print("Done. Your new project is available at {}".format(CONF["basedir"]))
if __name__ == "__main__":
main()
| 11,886
|
Python
|
.py
| 342
| 25.184211
| 88
| 0.545233
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,215
|
publishconf.py.jinja2
|
getpelican_pelican/pelican/tools/templates/publishconf.py.jinja2
|
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# If your site is available via HTTPS, make sure SITEURL begins with https://
SITEURL = "{{siteurl}}"
RELATIVE_URLS = False
FEED_ALL_ATOM = "feeds/all.atom.xml"
CATEGORY_FEED_ATOM = "feeds/{slug}.atom.xml"
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
# DISQUS_SITENAME = ""
# GOOGLE_ANALYTICS = ""
| 515
|
Python
|
.py
| 15
| 32.866667
| 77
| 0.764706
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,216
|
tasks.py.jinja2
|
getpelican_pelican/pelican/tools/templates/tasks.py.jinja2
|
import os
import shlex
import shutil
import sys
{% if github %}
import datetime
{% endif %}
from invoke import task
from invoke.main import program
{% if cloudfiles %}
from invoke.util import cd
{% endif %}
from pelican import main as pelican_main
from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer
from pelican.settings import DEFAULT_CONFIG, get_settings_from_file
OPEN_BROWSER_ON_SERVE = True
SETTINGS_FILE_BASE = "pelicanconf.py"
SETTINGS = {}
SETTINGS.update(DEFAULT_CONFIG)
LOCAL_SETTINGS = get_settings_from_file(SETTINGS_FILE_BASE)
SETTINGS.update(LOCAL_SETTINGS)
CONFIG = {
"settings_base": SETTINGS_FILE_BASE,
"settings_publish": "publishconf.py",
# Output path. Can be absolute or relative to tasks.py. Default: 'output'
"deploy_path": SETTINGS["OUTPUT_PATH"],
{% if ssh %}
# Remote server configuration
"ssh_user": "{{ssh_user}}",
"ssh_host": "{{ssh_host}}",
"ssh_port": "{{ssh_port}}",
"ssh_path": "{{ssh_target_dir}}",
{% endif %}
{% if cloudfiles %}
# Rackspace Cloud Files configuration settings
"cloudfiles_username": "{{cloudfiles_username}}",
"cloudfiles_api_key": "{{cloudfiles_api_key}}",
"cloudfiles_container": "{{cloudfiles_container}}",
{% endif %}
{% if github %}
# Github Pages configuration
"github_pages_branch": "{{github_pages_branch}}",
"commit_message": f"'Publish site on {datetime.date.today().isoformat()}'",
{% endif %}
# Host and port for `serve`
"host": "localhost",
"port": 8000,
}
@task
def clean(c):
"""Remove generated files"""
if os.path.isdir(CONFIG["deploy_path"]):
shutil.rmtree(CONFIG["deploy_path"])
os.makedirs(CONFIG["deploy_path"])
@task
def build(c):
"""Build local version of site"""
pelican_run("-s {settings_base}".format(**CONFIG))
@task
def rebuild(c):
"""`build` with the delete switch"""
pelican_run("-d -s {settings_base}".format(**CONFIG))
@task
def regenerate(c):
"""Automatically regenerate site upon file modification"""
pelican_run("-r -s {settings_base}".format(**CONFIG))
@task
def serve(c):
"""Serve site at http://$HOST:$PORT/ (default is localhost:8000)"""
class AddressReuseTCPServer(RootedHTTPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(
CONFIG["deploy_path"],
(CONFIG["host"], CONFIG["port"]),
ComplexHTTPRequestHandler,
)
if OPEN_BROWSER_ON_SERVE:
# Open site in default browser
import webbrowser
webbrowser.open("http://{host}:{port}".format(**CONFIG))
sys.stderr.write("Serving at {host}:{port} ...\n".format(**CONFIG))
server.serve_forever()
@task
def reserve(c):
"""`build`, then `serve`"""
build(c)
serve(c)
@task
def preview(c):
"""Build production version of site"""
pelican_run("-s {settings_publish}".format(**CONFIG))
@task
def livereload(c):
"""Automatically reload browser tab upon file modification."""
from livereload import Server
def cached_build():
cmd = "-s {settings_base} -e CACHE_CONTENT=true LOAD_CONTENT_CACHE=true"
pelican_run(cmd.format(**CONFIG))
cached_build()
server = Server()
theme_path = SETTINGS["THEME"]
watched_globs = [
CONFIG["settings_base"],
f"{theme_path}/templates/**/*.html",
]
content_file_extensions = [".md", ".rst"]
for extension in content_file_extensions:
content_glob = "{}/**/*{}".format(SETTINGS["PATH"], extension)
watched_globs.append(content_glob)
static_file_extensions = [".css", ".js"]
for extension in static_file_extensions:
static_file_glob = f"{theme_path}/static/**/*{extension}"
watched_globs.append(static_file_glob)
for glob in watched_globs:
server.watch(glob, cached_build)
if OPEN_BROWSER_ON_SERVE:
# Open site in default browser
import webbrowser
webbrowser.open("http://{host}:{port}".format(**CONFIG))
server.serve(host=CONFIG["host"], port=CONFIG["port"], root=CONFIG["deploy_path"])
{% if cloudfiles %}
@task
def cf_upload(c):
"""Publish to Rackspace Cloud Files"""
rebuild(c)
with cd(CONFIG["deploy_path"]):
c.run(
"swift -v -A https://auth.api.rackspacecloud.com/v1.0 "
"-U {cloudfiles_username} "
"-K {cloudfiles_api_key} "
"upload -c {cloudfiles_container} .".format(**CONFIG)
)
{% endif %}
@task
def publish(c):
"""Publish to production via rsync"""
pelican_run("-s {settings_publish}".format(**CONFIG))
c.run(
'rsync --delete --exclude ".DS_Store" -pthrvz -c '
'-e "ssh -p {ssh_port}" '
"{} {ssh_user}@{ssh_host}:{ssh_path}".format(
CONFIG["deploy_path"].rstrip("/") + "/", **CONFIG
)
)
{% if github %}
@task
def gh_pages(c):
"""Publish to GitHub Pages"""
preview(c)
c.run(
"ghp-import -b {github_pages_branch} "
"-m {commit_message} "
"{deploy_path} -p".format(**CONFIG)
)
{% endif %}
def pelican_run(cmd):
cmd += " " + program.core.remainder # allows to pass-through args to pelican
pelican_main(shlex.split(cmd))
| 5,216
|
Python
|
.py
| 158
| 28.196203
| 86
| 0.646227
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,217
|
pelicanconf.py.jinja2
|
getpelican_pelican/pelican/tools/templates/pelicanconf.py.jinja2
|
AUTHOR = {{author}}
SITENAME = {{sitename}}
SITEURL = ""
PATH = "content"
TIMEZONE = {{timezone}}
DEFAULT_LANG = {{lang}}
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (
("Pelican", "https://getpelican.com/"),
("Python.org", "https://www.python.org/"),
("Jinja2", "https://palletsprojects.com/p/jinja/"),
("You can modify those links in your config file", "#"),
)
# Social widget
SOCIAL = (
("You can add links in your config file", "#"),
("Another social link", "#"),
)
DEFAULT_PAGINATION = {{default_pagination}}
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
| 801
|
Python
|
.py
| 27
| 27.481481
| 77
| 0.697128
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,218
|
test_contents.py
|
getpelican_pelican/pelican/tests/test_contents.py
|
import datetime
import locale
import logging
import os.path
from posixpath import join as posix_join
from sys import platform
from jinja2.utils import generate_lorem_ipsum
from pelican.contents import Article, Author, Category, Page, Static
from pelican.plugins.signals import content_object_init
from pelican.settings import DEFAULT_CONFIG
from pelican.tests.support import LoggedTestCase, get_context, get_settings, unittest
from pelican.utils import path_to_url, posixize_path, truncate_html_words
# generate one paragraph, enclosed with <p>
TEST_CONTENT = str(generate_lorem_ipsum(n=1))
TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False)
class TestBase(LoggedTestCase):
def setUp(self):
super().setUp()
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, "C")
self.page_kwargs = {
"content": TEST_CONTENT,
"context": {
"localsiteurl": "",
"generated_content": {},
"static_content": {},
"static_links": set(),
},
"metadata": {
"summary": TEST_SUMMARY,
"title": "foo bar",
"author": Author("Blogger", DEFAULT_CONFIG),
},
"source_path": "/path/to/file/foo.ext",
}
self._disable_limit_filter()
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
self._enable_limit_filter()
def _disable_limit_filter(self):
from pelican.contents import logger
logger.disable_filter()
def _enable_limit_filter(self):
from pelican.contents import logger
logger.enable_filter()
def _copy_page_kwargs(self):
# make a deep copy of page_kwargs
page_kwargs = {key: self.page_kwargs[key] for key in self.page_kwargs}
for key in page_kwargs:
if not isinstance(page_kwargs[key], dict):
break
page_kwargs[key] = {
subkey: page_kwargs[key][subkey] for subkey in page_kwargs[key]
}
return page_kwargs
class TestPage(TestBase):
def test_use_args(self):
# Creating a page with arguments passed to the constructor should use
# them to initialise object's attributes.
metadata = {
"foo": "bar",
"foobar": "baz",
"title": "foobar",
}
page = Page(TEST_CONTENT, metadata=metadata, context={"localsiteurl": ""})
for key, value in metadata.items():
self.assertTrue(hasattr(page, key))
self.assertEqual(value, getattr(page, key))
self.assertEqual(page.content, TEST_CONTENT)
def test_mandatory_properties(self):
# If the title is not set, must throw an exception.
page = Page("content")
self.assertFalse(page._has_valid_mandatory_properties())
self.assertLogCountEqual(
count=1,
msg="Skipping .*: could not find information about 'title'",
level=logging.ERROR,
)
page = Page("content", metadata={"title": "foobar"})
self.assertTrue(page._has_valid_mandatory_properties())
def test_summary_from_metadata(self):
# If a :summary: metadata is given, it should be used
page = Page(**self.page_kwargs)
self.assertEqual(page.summary, TEST_SUMMARY)
def test_summary_max_length(self):
# If a :SUMMARY_MAX_LENGTH: is set, and there is no other summary,
# generated summary should not exceed the given length.
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
page_kwargs["settings"] = settings
del page_kwargs["metadata"]["summary"]
settings["SUMMARY_MAX_LENGTH"] = None
page = Page(**page_kwargs)
self.assertEqual(page.summary, TEST_CONTENT)
settings["SUMMARY_MAX_LENGTH"] = 10
page = Page(**page_kwargs)
self.assertEqual(page.summary, truncate_html_words(TEST_CONTENT, 10))
settings["SUMMARY_MAX_LENGTH"] = 0
page = Page(**page_kwargs)
self.assertEqual(page.summary, "")
def test_summary_paragraph(self):
# If SUMMARY_MAX_PARAGRAPHS is set, the generated summary should
# not exceed the given paragraph count.
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
page_kwargs["settings"] = settings
del page_kwargs["metadata"]["summary"]
settings["SUMMARY_MAX_PARAGRAPHS"] = 1
settings["SUMMARY_MAX_LENGTH"] = None
page = Page(**page_kwargs)
self.assertEqual(page.summary, TEST_CONTENT)
def test_summary_paragraph_max_length(self):
# If both SUMMARY_MAX_PARAGRAPHS and SUMMARY_MAX_LENGTH are set,
# the generated summary should not exceed the given paragraph count and
# not exceed the given length.
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
page_kwargs["settings"] = settings
del page_kwargs["metadata"]["summary"]
settings["SUMMARY_MAX_PARAGRAPHS"] = 1
settings["SUMMARY_MAX_LENGTH"] = 10
page = Page(**page_kwargs)
self.assertEqual(page.summary, truncate_html_words(TEST_CONTENT, 10))
def test_summary_end_suffix(self):
# If a :SUMMARY_END_SUFFIX: is set, and there is no other summary,
# generated summary should contain the specified marker at the end.
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
page_kwargs["settings"] = settings
del page_kwargs["metadata"]["summary"]
settings["SUMMARY_END_SUFFIX"] = "test_marker"
settings["SUMMARY_MAX_LENGTH"] = 10
page = Page(**page_kwargs)
self.assertEqual(
page.summary, truncate_html_words(TEST_CONTENT, 10, "test_marker")
)
self.assertIn("test_marker", page.summary)
def test_summary_get_summary_warning(self):
"""calling ._get_summary() should issue a warning"""
page_kwargs = self._copy_page_kwargs()
page = Page(**page_kwargs)
self.assertEqual(page.summary, TEST_SUMMARY)
self.assertEqual(page._get_summary(), TEST_SUMMARY)
self.assertLogCountEqual(
count=1,
msg=r"_get_summary\(\) has been deprecated since 3\.6\.4\. "
"Use the summary decorator instead",
level=logging.WARNING,
)
def test_slug(self):
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
page_kwargs["settings"] = settings
settings["SLUGIFY_SOURCE"] = "title"
page = Page(**page_kwargs)
self.assertEqual(page.slug, "foo-bar")
settings["SLUGIFY_SOURCE"] = "basename"
page = Page(**page_kwargs)
self.assertEqual(page.slug, "foo")
# test slug from title with unicode and case
inputs = (
# (title, expected, preserve_case, use_unicode)
("指導書", "zhi-dao-shu", False, False),
("指導書", "Zhi-Dao-Shu", True, False),
("指導書", "指導書", False, True),
("指導書", "指導書", True, True),
("Çığ", "cig", False, False),
("Çığ", "Cig", True, False),
("Çığ", "çığ", False, True),
("Çığ", "Çığ", True, True),
)
settings = get_settings()
page_kwargs = self._copy_page_kwargs()
page_kwargs["settings"] = settings
for title, expected, preserve_case, use_unicode in inputs:
settings["SLUGIFY_PRESERVE_CASE"] = preserve_case
settings["SLUGIFY_USE_UNICODE"] = use_unicode
page_kwargs["metadata"]["title"] = title
page = Page(**page_kwargs)
self.assertEqual(page.slug, expected, (title, preserve_case, use_unicode))
def test_defaultlang(self):
# If no lang is given, default to the default one.
page = Page(**self.page_kwargs)
self.assertEqual(page.lang, DEFAULT_CONFIG["DEFAULT_LANG"])
# it is possible to specify the lang in the metadata infos
self.page_kwargs["metadata"].update(
{
"lang": "fr",
}
)
page = Page(**self.page_kwargs)
self.assertEqual(page.lang, "fr")
def test_save_as(self):
# If a lang is not the default lang, save_as should be set
# accordingly.
# if a title is defined, save_as should be set
page = Page(**self.page_kwargs)
self.assertEqual(page.save_as, "pages/foo-bar.html")
# if a language is defined, save_as should include it accordingly
self.page_kwargs["metadata"].update(
{
"lang": "fr",
}
)
page = Page(**self.page_kwargs)
self.assertEqual(page.save_as, "pages/foo-bar-fr.html")
def test_relative_source_path(self):
# 'relative_source_path' should be the relative path
# from 'PATH' to 'source_path'
page_kwargs = self._copy_page_kwargs()
# If 'source_path' is None, 'relative_source_path' should
# also return None
page_kwargs["source_path"] = None
page = Page(**page_kwargs)
self.assertIsNone(page.relative_source_path)
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
full_path = page_kwargs["source_path"]
settings["PATH"] = os.path.dirname(full_path)
page_kwargs["settings"] = settings
page = Page(**page_kwargs)
# if 'source_path' is set, 'relative_source_path' should
# return the relative path from 'PATH' to 'source_path'
self.assertEqual(
page.relative_source_path,
os.path.relpath(full_path, os.path.dirname(full_path)),
)
def test_metadata_url_format(self):
# Arbitrary metadata should be passed through url_format()
page = Page(**self.page_kwargs)
self.assertIn("summary", page.url_format.keys())
page.metadata["directory"] = "test-dir"
page.settings = get_settings(PAGE_SAVE_AS="{directory}/{slug}")
self.assertEqual(page.save_as, "test-dir/foo-bar")
def test_datetime(self):
# If DATETIME is set to a tuple, it should be used to override LOCALE
dt = datetime.datetime(2015, 9, 13)
page_kwargs = self._copy_page_kwargs()
# set its date to dt
page_kwargs["metadata"]["date"] = dt
page = Page(**page_kwargs)
# page.locale_date is a unicode string in both python2 and python3
dt_date = dt.strftime(DEFAULT_CONFIG["DEFAULT_DATE_FORMAT"])
self.assertEqual(page.locale_date, dt_date)
page_kwargs["settings"] = get_settings()
# I doubt this can work on all platforms ...
if platform == "win32":
locale = "jpn"
else:
locale = "ja_JP.utf8"
page_kwargs["settings"]["DATE_FORMATS"] = {"jp": (locale, "%Y-%m-%d(%a)")}
page_kwargs["metadata"]["lang"] = "jp"
import locale as locale_module
try:
page = Page(**page_kwargs)
self.assertEqual(page.locale_date, "2015-09-13(\u65e5)")
except locale_module.Error:
# The constructor of ``Page`` will try to set the locale to
# ``ja_JP.utf8``. But this attempt will failed when there is no
# such locale in the system. You can see which locales there are
# in your system with ``locale -a`` command.
#
# Until we find some other method to test this functionality, we
# will simply skip this test.
unittest.skip(f"There is no locale {locale} in this system.")
def test_template(self):
# Pages default to page, metadata overwrites
default_page = Page(**self.page_kwargs)
self.assertEqual("page", default_page.template)
page_kwargs = self._copy_page_kwargs()
page_kwargs["metadata"]["template"] = "custom"
custom_page = Page(**page_kwargs)
self.assertEqual("custom", custom_page.template)
def test_signal(self):
def receiver_test_function(sender):
receiver_test_function.has_been_called = True
receiver_test_function.has_been_called = False
content_object_init.connect(receiver_test_function)
self.assertIn(receiver_test_function, content_object_init.receivers_for(Page))
self.assertFalse(receiver_test_function.has_been_called)
Page(**self.page_kwargs)
self.assertTrue(receiver_test_function.has_been_called)
def test_get_content(self):
# Test that the content is updated with the relative links to
# filenames, tags and categories.
settings = get_settings()
args = self.page_kwargs.copy()
args["settings"] = settings
# Tag
args["content"] = 'A simple test, with a <a href="|tag|tagname">link</a>'
page = Page(**args)
content = page.get_content("http://notmyidea.org")
self.assertEqual(
content,
(
"A simple test, with a "
'<a href="http://notmyidea.org/tag/tagname.html">link</a>'
),
)
# Category
args["content"] = 'A simple test, with a <a href="|category|category">link</a>'
page = Page(**args)
content = page.get_content("http://notmyidea.org")
self.assertEqual(
content,
(
"A simple test, with a "
'<a href="http://notmyidea.org/category/category.html">link</a>'
),
)
def test_intrasite_link(self):
cls_name = "_DummyArticle"
article = type(cls_name, (object,), {"url": "article.html"})
args = self.page_kwargs.copy()
args["settings"] = get_settings()
args["source_path"] = "content"
args["context"]["generated_content"] = {"article.rst": article}
# Classic intrasite link via filename
args["content"] = (
'A simple test, with a <a href="|filename|article.rst">link</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"A simple test, with a "
'<a href="http://notmyidea.org/article.html">link</a>',
)
# fragment
args["content"] = (
"A simple test, with a "
'<a href="|filename|article.rst#section-2">link</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"A simple test, with a "
'<a href="http://notmyidea.org/article.html#section-2">link</a>',
)
# query
args["content"] = (
"A simple test, with a "
'<a href="|filename|article.rst'
'?utm_whatever=234&highlight=word">link</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"A simple test, with a "
'<a href="http://notmyidea.org/article.html'
'?utm_whatever=234&highlight=word">link</a>',
)
# combination
args["content"] = (
"A simple test, with a "
'<a href="|filename|article.rst'
'?utm_whatever=234&highlight=word#section-2">link</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"A simple test, with a "
'<a href="http://notmyidea.org/article.html'
'?utm_whatever=234&highlight=word#section-2">link</a>',
)
# also test for summary in metadata
parsed = (
'A simple summary test, with a <a href="|filename|article.rst">link</a>'
)
linked = (
"A simple summary test, with a "
'<a href="http://notmyidea.org/article.html">link</a>'
)
args["settings"]["FORMATTED_FIELDS"] = ["summary", "custom"]
args["metadata"]["summary"] = parsed
args["metadata"]["custom"] = parsed
args["context"]["localsiteurl"] = "http://notmyidea.org"
p = Page(**args)
# This is called implicitly from all generators and Pelican.run() once
# all files are processed. Here we process just one page so it needs
# to be called explicitly.
p.refresh_metadata_intersite_links()
self.assertEqual(p.summary, linked)
self.assertEqual(p.custom, linked)
def test_intrasite_link_more(self):
cls_name = "_DummyAsset"
args = self.page_kwargs.copy()
args["settings"] = get_settings()
args["source_path"] = "content"
args["context"]["static_content"] = {
"images/poster.jpg": type(
cls_name, (object,), {"url": "images/poster.jpg"}
),
"assets/video.mp4": type(cls_name, (object,), {"url": "assets/video.mp4"}),
"images/graph.svg": type(cls_name, (object,), {"url": "images/graph.svg"}),
}
args["context"]["generated_content"] = {
"reference.rst": type(cls_name, (object,), {"url": "reference.html"}),
}
# video.poster
args["content"] = (
"There is a video with poster "
'<video controls poster="{static}/images/poster.jpg">'
'<source src="|static|/assets/video.mp4" type="video/mp4">'
"</video>"
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"There is a video with poster "
'<video controls poster="http://notmyidea.org/images/poster.jpg">'
'<source src="http://notmyidea.org/assets/video.mp4"'
' type="video/mp4">'
"</video>",
)
# object.data
args["content"] = (
"There is a svg object "
'<object data="{static}/images/graph.svg"'
' type="image/svg+xml">'
"</object>"
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"There is a svg object "
'<object data="http://notmyidea.org/images/graph.svg"'
' type="image/svg+xml">'
"</object>",
)
# blockquote.cite
args["content"] = (
"There is a blockquote with cite attribute "
'<blockquote cite="{filename}reference.rst">blah blah</blockquote>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"There is a blockquote with cite attribute "
'<blockquote cite="http://notmyidea.org/reference.html">'
"blah blah"
"</blockquote>",
)
def test_intrasite_link_absolute(self):
"""Test that absolute URLs are merged properly."""
args = self.page_kwargs.copy()
args["settings"] = get_settings(
STATIC_URL="http://static.cool.site/{path}",
ARTICLE_URL="http://blog.cool.site/{slug}.html",
)
args["source_path"] = "content"
args["context"]["static_content"] = {
"images/poster.jpg": Static(
"", settings=args["settings"], source_path="images/poster.jpg"
),
}
args["context"]["generated_content"] = {
"article.rst": Article(
"",
settings=args["settings"],
metadata={"slug": "article", "title": "Article"},
)
}
# Article link will go to blog
args["content"] = '<a href="{filename}article.rst">Article</a>'
content = Page(**args).get_content("http://cool.site")
self.assertEqual(
content, '<a href="http://blog.cool.site/article.html">Article</a>'
)
# Page link will go to the main site
args["content"] = '<a href="{index}">Index</a>'
content = Page(**args).get_content("http://cool.site")
self.assertEqual(content, '<a href="http://cool.site/index.html">Index</a>')
# Image link will go to static
args["content"] = '<img src="{static}/images/poster.jpg"/>'
content = Page(**args).get_content("http://cool.site")
self.assertEqual(
content, '<img src="http://static.cool.site/images/poster.jpg"/>'
)
# Image link will go to static
args["content"] = '<meta content="{static}/images/poster.jpg"/>'
content = Page(**args).get_content("http://cool.site")
self.assertEqual(
content, '<meta content="http://static.cool.site/images/poster.jpg"/>'
)
def test_intrasite_link_escape(self):
article = type("_DummyArticle", (object,), {"url": "article-spaces.html"})
asset = type("_DummyAsset", (object,), {"url": "name@example.com"})
args = self.page_kwargs.copy()
args["settings"] = get_settings()
args["source_path"] = "content"
args["context"]["generated_content"] = {"article spaces.rst": article}
args["context"]["static_content"] = {"name@example.com": asset}
expected_output = (
"A simple test with a "
'<a href="http://notmyidea.org/article-spaces.html#anchor">link</a> '
'<a href="http://notmyidea.org/name@example.com#anchor">file</a>'
)
# not escaped
args["content"] = (
"A simple test with a "
'<a href="{filename}article spaces.rst#anchor">link</a> '
'<a href="{static}name@example.com#anchor">file</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(content, expected_output)
# html escaped
args["content"] = (
"A simple test with a "
'<a href="{filename}article spaces.rst#anchor">link</a> '
'<a href="{static}name@example.com#anchor">file</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(content, expected_output)
# url escaped
args["content"] = (
"A simple test with a "
'<a href="{filename}article%20spaces.rst#anchor">link</a> '
'<a href="{static}name%40example.com#anchor">file</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(content, expected_output)
# html and url escaped
args["content"] = (
"A simple test with a "
'<a href="{filename}article%20spaces.rst#anchor">link</a> '
'<a href="{static}name@example.com#anchor">file</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(content, expected_output)
def test_intrasite_link_markdown_spaces(self):
cls_name = "_DummyArticle"
article = type(cls_name, (object,), {"url": "article-spaces.html"})
args = self.page_kwargs.copy()
args["settings"] = get_settings()
args["source_path"] = "content"
args["context"]["generated_content"] = {"article spaces.rst": article}
# An intrasite link via filename with %20 as a space
args["content"] = (
'A simple test, with a <a href="|filename|article%20spaces.rst">link</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"A simple test, with a "
'<a href="http://notmyidea.org/article-spaces.html">link</a>',
)
def test_intrasite_link_source_and_generated(self):
"""Test linking both to the source and the generated article"""
cls_name = "_DummyAsset"
args = self.page_kwargs.copy()
args["settings"] = get_settings()
args["source_path"] = "content"
args["context"]["generated_content"] = {
"article.rst": type(cls_name, (object,), {"url": "article.html"})
}
args["context"]["static_content"] = {
"article.rst": type(cls_name, (object,), {"url": "article.rst"})
}
args["content"] = (
"A simple test, with a link to an"
'<a href="{filename}article.rst">article</a> and its'
'<a href="{static}article.rst">source</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"A simple test, with a link to an"
'<a href="http://notmyidea.org/article.html">article</a> and its'
'<a href="http://notmyidea.org/article.rst">source</a>',
)
def test_intrasite_link_to_static_content_with_filename(self):
"""Test linking to a static resource with deprecated {filename}"""
cls_name = "_DummyAsset"
args = self.page_kwargs.copy()
args["settings"] = get_settings()
args["source_path"] = "content"
args["context"]["static_content"] = {
"poster.jpg": type(cls_name, (object,), {"url": "images/poster.jpg"})
}
args["content"] = (
"A simple test, with a link to a"
'<a href="{filename}poster.jpg">poster</a>'
)
content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
"A simple test, with a link to a"
'<a href="http://notmyidea.org/images/poster.jpg">poster</a>',
)
def test_multiple_authors(self):
"""Test article with multiple authors."""
args = self.page_kwargs.copy()
content = Page(**args)
assert content.authors == [content.author]
args["metadata"].pop("author")
args["metadata"]["authors"] = [
Author("First Author", DEFAULT_CONFIG),
Author("Second Author", DEFAULT_CONFIG),
]
content = Page(**args)
assert content.authors
assert content.author == content.authors[0]
class TestArticle(TestBase):
def test_template(self):
# Articles default to article, metadata overwrites
default_article = Article(**self.page_kwargs)
self.assertEqual("article", default_article.template)
article_kwargs = self._copy_page_kwargs()
article_kwargs["metadata"]["template"] = "custom"
custom_article = Article(**article_kwargs)
self.assertEqual("custom", custom_article.template)
def test_slugify_category_author(self):
settings = get_settings()
settings["SLUG_REGEX_SUBSTITUTIONS"] = [
(r"C#", "csharp"),
(r"[^\w\s-]", ""),
(r"(?u)\A\s*", ""),
(r"(?u)\s*\Z", ""),
(r"[-\s]+", "-"),
]
settings["ARTICLE_URL"] = "{author}/{category}/{slug}/"
settings["ARTICLE_SAVE_AS"] = "{author}/{category}/{slug}/index.html"
article_kwargs = self._copy_page_kwargs()
article_kwargs["metadata"]["author"] = Author("O'Brien", settings)
article_kwargs["metadata"]["category"] = Category("C# & stuff", settings)
article_kwargs["metadata"]["title"] = "fnord"
article_kwargs["settings"] = settings
article = Article(**article_kwargs)
self.assertEqual(article.url, "obrien/csharp-stuff/fnord/")
self.assertEqual(article.save_as, "obrien/csharp-stuff/fnord/index.html")
def test_slugify_with_author_substitutions(self):
settings = get_settings()
settings["AUTHOR_REGEX_SUBSTITUTIONS"] = [
("Alexander Todorov", "atodorov"),
("Krasimir Tsonev", "krasimir"),
(r"[^\w\s-]", ""),
(r"(?u)\A\s*", ""),
(r"(?u)\s*\Z", ""),
(r"[-\s]+", "-"),
]
settings["ARTICLE_URL"] = "blog/{author}/{slug}/"
settings["ARTICLE_SAVE_AS"] = "blog/{author}/{slug}/index.html"
article_kwargs = self._copy_page_kwargs()
article_kwargs["metadata"]["author"] = Author("Alexander Todorov", settings)
article_kwargs["metadata"]["title"] = "fnord"
article_kwargs["settings"] = settings
article = Article(**article_kwargs)
self.assertEqual(article.url, "blog/atodorov/fnord/")
self.assertEqual(article.save_as, "blog/atodorov/fnord/index.html")
def test_slugify_category_with_dots(self):
settings = get_settings()
settings["CATEGORY_REGEX_SUBSTITUTIONS"] = [
("Fedora QA", "fedora.qa"),
]
settings["ARTICLE_URL"] = "{category}/{slug}/"
article_kwargs = self._copy_page_kwargs()
article_kwargs["metadata"]["category"] = Category("Fedora QA", settings)
article_kwargs["metadata"]["title"] = "This Week in Fedora QA"
article_kwargs["settings"] = settings
article = Article(**article_kwargs)
self.assertEqual(article.url, "fedora.qa/this-week-in-fedora-qa/")
def test_valid_save_as_detects_breakout(self):
settings = get_settings()
article_kwargs = self._copy_page_kwargs()
article_kwargs["metadata"]["slug"] = "../foo"
article_kwargs["settings"] = settings
article = Article(**article_kwargs)
self.assertFalse(article._has_valid_save_as())
def test_valid_save_as_detects_breakout_to_root(self):
settings = get_settings()
article_kwargs = self._copy_page_kwargs()
article_kwargs["metadata"]["slug"] = "/foo"
article_kwargs["settings"] = settings
article = Article(**article_kwargs)
self.assertFalse(article._has_valid_save_as())
def test_valid_save_as_passes_valid(self):
settings = get_settings()
article_kwargs = self._copy_page_kwargs()
article_kwargs["metadata"]["slug"] = "foo"
article_kwargs["settings"] = settings
article = Article(**article_kwargs)
self.assertTrue(article._has_valid_save_as())
class TestStatic(LoggedTestCase):
def setUp(self):
super().setUp()
self.settings = get_settings(
STATIC_SAVE_AS="{path}",
STATIC_URL="{path}",
PAGE_SAVE_AS=os.path.join("outpages", "{slug}.html"),
PAGE_URL="outpages/{slug}.html",
)
self.context = get_context(self.settings)
self.static = Static(
content=None,
metadata={},
settings=self.settings,
source_path=posix_join("dir", "foo.jpg"),
context=self.context,
)
self.context["static_content"][self.static.source_path] = self.static
def tearDown(self):
pass
def test_attach_to_same_dir(self):
"""attach_to() overrides a static file's save_as and url."""
page = Page(
content="fake page",
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "fakepage.md"),
)
self.static.attach_to(page)
expected_save_as = os.path.join("outpages", "foo.jpg")
self.assertEqual(self.static.save_as, expected_save_as)
self.assertEqual(self.static.url, path_to_url(expected_save_as))
def test_attach_to_parent_dir(self):
"""attach_to() preserves dirs inside the linking document dir."""
page = Page(
content="fake page",
metadata={"title": "fakepage"},
settings=self.settings,
source_path="fakepage.md",
)
self.static.attach_to(page)
expected_save_as = os.path.join("outpages", "dir", "foo.jpg")
self.assertEqual(self.static.save_as, expected_save_as)
self.assertEqual(self.static.url, path_to_url(expected_save_as))
def test_attach_to_other_dir(self):
"""attach_to() ignores dirs outside the linking document dir."""
page = Page(
content="fake page",
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "otherdir", "fakepage.md"),
)
self.static.attach_to(page)
expected_save_as = os.path.join("outpages", "foo.jpg")
self.assertEqual(self.static.save_as, expected_save_as)
self.assertEqual(self.static.url, path_to_url(expected_save_as))
def test_attach_to_ignores_subsequent_calls(self):
"""attach_to() does nothing when called a second time."""
page = Page(
content="fake page",
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "fakepage.md"),
)
self.static.attach_to(page)
otherdir_settings = self.settings.copy()
otherdir_settings.update(
{
"PAGE_SAVE_AS": os.path.join("otherpages", "{slug}.html"),
"PAGE_URL": "otherpages/{slug}.html",
}
)
otherdir_page = Page(
content="other page",
metadata={"title": "otherpage"},
settings=otherdir_settings,
source_path=os.path.join("dir", "otherpage.md"),
)
self.static.attach_to(otherdir_page)
otherdir_save_as = os.path.join("otherpages", "foo.jpg")
self.assertNotEqual(self.static.save_as, otherdir_save_as)
self.assertNotEqual(self.static.url, path_to_url(otherdir_save_as))
def test_attach_to_does_nothing_after_save_as_referenced(self):
"""attach_to() does nothing if the save_as was already referenced.
(For example, by a {static} link an a document processed earlier.)
"""
original_save_as = self.static.save_as
page = Page(
content="fake page",
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "fakepage.md"),
)
self.static.attach_to(page)
self.assertEqual(self.static.save_as, original_save_as)
self.assertEqual(self.static.url, path_to_url(original_save_as))
def test_attach_to_does_nothing_after_url_referenced(self):
"""attach_to() does nothing if the url was already referenced.
(For example, by a {static} link an a document processed earlier.)
"""
original_url = self.static.url
page = Page(
content="fake page",
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "fakepage.md"),
)
self.static.attach_to(page)
self.assertEqual(self.static.save_as, self.static.source_path)
self.assertEqual(self.static.url, original_url)
def test_attach_to_does_not_override_an_override(self):
"""attach_to() does not override paths that were overridden elsewhere.
(For example, by the user with EXTRA_PATH_METADATA)
"""
customstatic = Static(
content=None,
metadata={"save_as": "customfoo.jpg", "url": "customfoo.jpg"},
settings=self.settings,
source_path=os.path.join("dir", "foo.jpg"),
context=self.settings.copy(),
)
page = Page(
content="fake page",
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "fakepage.md"),
)
customstatic.attach_to(page)
self.assertEqual(customstatic.save_as, "customfoo.jpg")
self.assertEqual(customstatic.url, "customfoo.jpg")
def test_attach_link_syntax(self):
"""{attach} link syntax triggers output path override & url replacement."""
html = '<a href="{attach}../foo.jpg">link</a>'
page = Page(
content=html,
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "otherdir", "fakepage.md"),
context=self.context,
)
content = page.get_content("")
self.assertNotEqual(
content, html, "{attach} link syntax did not trigger URL replacement."
)
expected_save_as = os.path.join("outpages", "foo.jpg")
self.assertEqual(self.static.save_as, expected_save_as)
self.assertEqual(self.static.url, path_to_url(expected_save_as))
def test_tag_link_syntax(self):
"{tag} link syntax triggers url replacement."
html = '<a href="{tag}foo">link</a>'
page = Page(
content=html,
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "otherdir", "fakepage.md"),
context=self.context,
)
content = page.get_content("")
self.assertNotEqual(content, html)
def test_category_link_syntax(self):
"{category} link syntax triggers url replacement."
html = '<a href="{category}foo">link</a>'
page = Page(
content=html,
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "otherdir", "fakepage.md"),
context=self.context,
)
content = page.get_content("")
self.assertNotEqual(content, html)
def test_author_link_syntax(self):
"{author} link syntax triggers url replacement."
html = '<a href="{author}foo">link</a>'
page = Page(
content=html,
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "otherdir", "fakepage.md"),
context=self.context,
)
content = page.get_content("")
self.assertNotEqual(content, html)
def test_index_link_syntax(self):
"{index} link syntax triggers url replacement."
html = '<a href="{index}">link</a>'
page = Page(
content=html,
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "otherdir", "fakepage.md"),
context=self.context,
)
content = page.get_content("")
self.assertNotEqual(content, html)
expected_html = (
'<a href="'
+ "/".join((self.settings["SITEURL"], self.settings["INDEX_SAVE_AS"]))
+ '">link</a>'
)
self.assertEqual(content, expected_html)
def test_unknown_link_syntax(self):
"{unknown} link syntax should trigger warning."
html = '<a href="{unknown}foo">link</a>'
page = Page(
content=html,
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "otherdir", "fakepage.md"),
context=self.context,
)
content = page.get_content("")
self.assertEqual(content, html)
self.assertLogCountEqual(
count=1,
msg="Replacement Indicator 'unknown' not recognized, "
"skipping replacement",
level=logging.WARNING,
)
def test_link_to_unknown_file(self):
"{filename} link to unknown file should trigger warning."
html = '<a href="{filename}foo">link</a>'
page = Page(
content=html,
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "otherdir", "fakepage.md"),
context=self.context,
)
content = page.get_content("")
self.assertEqual(content, html)
self.assertLogCountEqual(
count=1,
msg="Unable to find 'foo', skipping url replacement.",
level=logging.WARNING,
)
def test_index_link_syntax_with_spaces(self):
"""{index} link syntax triggers url replacement
with spaces around the equal sign."""
html = '<a href = "{index}">link</a>'
page = Page(
content=html,
metadata={"title": "fakepage"},
settings=self.settings,
source_path=os.path.join("dir", "otherdir", "fakepage.md"),
context=self.context,
)
content = page.get_content("")
self.assertNotEqual(content, html)
expected_html = (
'<a href = "'
+ "/".join((self.settings["SITEURL"], self.settings["INDEX_SAVE_AS"]))
+ '">link</a>'
)
self.assertEqual(content, expected_html)
def test_not_save_as_draft(self):
"""Static.save_as is not affected by draft status."""
static = Static(
content=None,
metadata={
"status": "draft",
},
settings=self.settings,
source_path=os.path.join("dir", "foo.jpg"),
context=self.settings.copy(),
)
expected_save_as = posixize_path(os.path.join("dir", "foo.jpg"))
self.assertEqual(static.status, "draft")
self.assertEqual(static.save_as, expected_save_as)
self.assertEqual(static.url, path_to_url(expected_save_as))
| 41,368
|
Python
|
.py
| 952
| 33.246849
| 87
| 0.580811
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,219
|
test_rstdirectives.py
|
getpelican_pelican/pelican/tests/test_rstdirectives.py
|
from unittest.mock import Mock
from pelican.tests.support import unittest
class Test_abbr_role(unittest.TestCase):
def call_it(self, text):
from pelican.rstdirectives import abbr_role
rawtext = text
lineno = 42
inliner = Mock(name="inliner")
nodes, system_messages = abbr_role("abbr", rawtext, text, lineno, inliner)
self.assertEqual(system_messages, [])
self.assertEqual(len(nodes), 1)
return nodes[0]
def test(self):
node = self.call_it("Abbr (Abbreviation)")
self.assertEqual(node.astext(), "Abbr")
self.assertEqual(node["explanation"], "Abbreviation")
def test_newlines_in_explanation(self):
node = self.call_it("CUL (See you\nlater)")
self.assertEqual(node.astext(), "CUL")
self.assertEqual(node["explanation"], "See you\nlater")
def test_newlines_in_abbr(self):
node = self.call_it("US of\nA \n (USA)")
self.assertEqual(node.astext(), "US of\nA")
self.assertEqual(node["explanation"], "USA")
| 1,057
|
Python
|
.py
| 24
| 36.416667
| 82
| 0.651072
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,220
|
test_testsuite.py
|
getpelican_pelican/pelican/tests/test_testsuite.py
|
import warnings
from pelican.tests.support import unittest
class TestSuiteTest(unittest.TestCase):
def test_error_on_warning(self):
with self.assertRaises(UserWarning):
warnings.warn("test warning") # noqa: B028
| 240
|
Python
|
.py
| 6
| 34.5
| 55
| 0.744589
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,221
|
test_paginator.py
|
getpelican_pelican/pelican/tests/test_paginator.py
|
import locale
from jinja2.utils import generate_lorem_ipsum
from pelican.contents import Article, Author
from pelican.paginator import Paginator
from pelican.settings import DEFAULT_CONFIG
from pelican.tests.support import get_settings, unittest
# generate one paragraph, enclosed with <p>
TEST_CONTENT = str(generate_lorem_ipsum(n=1))
TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False)
class TestPage(unittest.TestCase):
def setUp(self):
super().setUp()
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, "C")
self.page_kwargs = {
"content": TEST_CONTENT,
"context": {
"localsiteurl": "",
},
"metadata": {
"summary": TEST_SUMMARY,
"title": "foo bar",
},
"source_path": "/path/to/file/foo.ext",
}
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_save_as_preservation(self):
settings = get_settings()
# fix up pagination rules
from pelican.paginator import PaginationRule
pagination_rules = [
PaginationRule(*r)
for r in settings.get(
"PAGINATION_PATTERNS",
DEFAULT_CONFIG["PAGINATION_PATTERNS"],
)
]
settings["PAGINATION_PATTERNS"] = sorted(
pagination_rules,
key=lambda r: r[0],
)
self.page_kwargs["metadata"]["author"] = Author("Blogger", settings)
object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)]
paginator = Paginator("foobar.foo", "foobar/foo", object_list, settings)
page = paginator.page(1)
self.assertEqual(page.save_as, "foobar.foo")
def test_custom_pagination_pattern(self):
from pelican.paginator import PaginationRule
settings = get_settings()
settings["PAGINATION_PATTERNS"] = [
PaginationRule(*r)
for r in [
(1, "/{url}", "{base_name}/index.html"),
(2, "/{url}{number}/", "{base_name}/{number}/index.html"),
]
]
self.page_kwargs["metadata"]["author"] = Author("Blogger", settings)
object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)]
paginator = Paginator(
"blog/index.html", "//blog.my.site/", object_list, settings, 1
)
# The URL *has to* stay absolute (with // in the front), so verify that
page1 = paginator.page(1)
self.assertEqual(page1.save_as, "blog/index.html")
self.assertEqual(page1.url, "//blog.my.site/")
page2 = paginator.page(2)
self.assertEqual(page2.save_as, "blog/2/index.html")
self.assertEqual(page2.url, "//blog.my.site/2/")
def test_custom_pagination_pattern_last_page(self):
from pelican.paginator import PaginationRule
settings = get_settings()
settings["PAGINATION_PATTERNS"] = [
PaginationRule(*r)
for r in [
(1, "/{url}1/", "{base_name}/1/index.html"),
(2, "/{url}{number}/", "{base_name}/{number}/index.html"),
(-1, "/{url}", "{base_name}/index.html"),
]
]
self.page_kwargs["metadata"]["author"] = Author("Blogger", settings)
object_list = [
Article(**self.page_kwargs),
Article(**self.page_kwargs),
Article(**self.page_kwargs),
]
paginator = Paginator(
"blog/index.html", "//blog.my.site/", object_list, settings, 1
)
# The URL *has to* stay absolute (with // in the front), so verify that
page1 = paginator.page(1)
self.assertEqual(page1.save_as, "blog/1/index.html")
self.assertEqual(page1.url, "//blog.my.site/1/")
page2 = paginator.page(2)
self.assertEqual(page2.save_as, "blog/2/index.html")
self.assertEqual(page2.url, "//blog.my.site/2/")
page3 = paginator.page(3)
self.assertEqual(page3.save_as, "blog/index.html")
self.assertEqual(page3.url, "//blog.my.site/")
| 4,194
|
Python
|
.py
| 99
| 32.525253
| 80
| 0.58652
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,222
|
test_cli.py
|
getpelican_pelican/pelican/tests/test_cli.py
|
import unittest
from pelican import get_config, parse_arguments
class TestParseOverrides(unittest.TestCase):
def test_flags(self):
for flag in ["-e", "--extra-settings"]:
args = parse_arguments([flag, "k=1"])
self.assertDictEqual(args.overrides, {"k": 1})
def test_parse_multiple_items(self):
args = parse_arguments("-e k1=1 k2=2".split())
self.assertDictEqual(args.overrides, {"k1": 1, "k2": 2})
def test_parse_valid_json(self):
json_values_python_values_map = {
'""': "",
"null": None,
'"string"': "string",
'["foo", 12, "4", {}]': ["foo", 12, "4", {}],
}
for k, v in json_values_python_values_map.items():
args = parse_arguments(["-e", "k=" + k])
self.assertDictEqual(args.overrides, {"k": v})
def test_parse_invalid_syntax(self):
invalid_items = ["k= 1", "k =1", "k", "k v"]
for item in invalid_items:
with self.assertRaises(ValueError):
parse_arguments(f"-e {item}".split())
def test_parse_invalid_json(self):
invalid_json = {
"",
"False",
"True",
"None",
"some other string",
'{"foo": bar}',
"[foo]",
}
for v in invalid_json:
with self.assertRaises(ValueError):
parse_arguments(["-e ", "k=" + v])
class TestGetConfigFromArgs(unittest.TestCase):
def test_overrides_known_keys(self):
args = parse_arguments(
[
"-e",
"DELETE_OUTPUT_DIRECTORY=false",
'OUTPUT_RETENTION=["1.txt"]',
'SITENAME="Title"',
]
)
config = get_config(args)
config_must_contain = {
"DELETE_OUTPUT_DIRECTORY": False,
"OUTPUT_RETENTION": ["1.txt"],
"SITENAME": "Title",
}
self.assertDictEqual(config, {**config, **config_must_contain})
def test_overrides_non_default_type(self):
args = parse_arguments(
[
"-e",
"DISPLAY_PAGES_ON_MENU=123",
"PAGE_TRANSLATION_ID=null",
'TRANSLATION_FEED_RSS_URL="someurl"',
]
)
config = get_config(args)
config_must_contain = {
"DISPLAY_PAGES_ON_MENU": 123,
"PAGE_TRANSLATION_ID": None,
"TRANSLATION_FEED_RSS_URL": "someurl",
}
self.assertDictEqual(config, {**config, **config_must_contain})
| 2,603
|
Python
|
.py
| 71
| 25.71831
| 71
| 0.511895
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,223
|
test_log.py
|
getpelican_pelican/pelican/tests/test_log.py
|
import logging
import unittest
from collections import defaultdict
from contextlib import contextmanager
from pelican import log
from pelican.tests.support import LogCountHandler
class TestLog(unittest.TestCase):
def setUp(self):
super().setUp()
self.logger = logging.getLogger(__name__)
self.handler = LogCountHandler()
self.logger.addHandler(self.handler)
def tearDown(self):
self._reset_limit_filter()
super().tearDown()
def _reset_limit_filter(self):
log.LimitFilter._ignore = set()
log.LimitFilter._raised_messages = set()
log.LimitFilter._threshold = 5
log.LimitFilter._group_count = defaultdict(int)
@contextmanager
def reset_logger(self):
try:
yield None
finally:
self._reset_limit_filter()
self.handler.flush()
def test_log_filter(self):
def do_logging():
for i in range(5):
self.logger.warning("Log %s", i)
self.logger.warning("Another log %s", i)
# no filter
with self.reset_logger():
do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 5)
self.assertEqual(
self.handler.count_logs("Another log \\d", logging.WARNING), 5
)
# filter by template
with self.reset_logger():
log.LimitFilter._ignore.add((logging.WARNING, "Log %s"))
do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 0)
self.assertEqual(
self.handler.count_logs("Another log \\d", logging.WARNING), 5
)
# filter by exact message
with self.reset_logger():
log.LimitFilter._ignore.add((logging.WARNING, "Log 3"))
do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 4)
self.assertEqual(
self.handler.count_logs("Another log \\d", logging.WARNING), 5
)
# filter by both
with self.reset_logger():
log.LimitFilter._ignore.add((logging.WARNING, "Log 3"))
log.LimitFilter._ignore.add((logging.WARNING, "Another log %s"))
do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 4)
self.assertEqual(
self.handler.count_logs("Another log \\d", logging.WARNING), 0
)
| 2,535
|
Python
|
.py
| 64
| 29.4375
| 84
| 0.594309
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,224
|
test_utils.py
|
getpelican_pelican/pelican/tests/test_utils.py
|
import locale
import logging
import os
import shutil
from datetime import timezone
from sys import platform
from tempfile import mkdtemp
try:
from zoneinfo import ZoneInfo
except ModuleNotFoundError:
from backports.zoneinfo import ZoneInfo
from pelican import utils
from pelican.generators import TemplatePagesGenerator
from pelican.settings import read_settings
from pelican.tests.support import (
LoggedTestCase,
get_article,
locale_available,
unittest,
)
from pelican.writers import Writer
class ClassDeprAttr:
_new_attribute = "new_value"
@utils.deprecated_attribute(
old="_old_attribute", new="_new_attribute", since=(3, 1, 0), remove=(4, 1, 3)
)
def _old_attribute():
return None
class TestUtils(LoggedTestCase):
def setUp(self):
super().setUp()
self.temp_output = mkdtemp(prefix="pelicantests.")
def tearDown(self):
super().tearDown()
shutil.rmtree(self.temp_output)
def test_deprecated_attribute(self):
test_class = ClassDeprAttr()
value = test_class._old_attribute
self.assertEqual(value, test_class._new_attribute)
self.assertLogCountEqual(
count=1,
msg=(
"_old_attribute has been deprecated since 3.1.0 and will be "
"removed by version 4.1.3. Use _new_attribute instead"
),
level=logging.WARNING,
)
def test_get_date(self):
# valid ones
date = utils.SafeDatetime(year=2012, month=11, day=22)
date_hour = utils.SafeDatetime(year=2012, month=11, day=22, hour=22, minute=11)
date_hour_z = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11, tzinfo=timezone.utc
)
date_hour_est = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11, tzinfo=ZoneInfo("EST")
)
date_hour_sec = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11, second=10
)
date_hour_sec_z = utils.SafeDatetime(
year=2012,
month=11,
day=22,
hour=22,
minute=11,
second=10,
tzinfo=timezone.utc,
)
date_hour_sec_est = utils.SafeDatetime(
year=2012,
month=11,
day=22,
hour=22,
minute=11,
second=10,
tzinfo=ZoneInfo("EST"),
)
date_hour_sec_frac_z = utils.SafeDatetime(
year=2012,
month=11,
day=22,
hour=22,
minute=11,
second=10,
microsecond=123000,
tzinfo=timezone.utc,
)
dates = {
"2012-11-22": date,
"2012/11/22": date,
"2012-11-22 22:11": date_hour,
"2012/11/22 22:11": date_hour,
"22-11-2012": date,
"22/11/2012": date,
"22.11.2012": date,
"22.11.2012 22:11": date_hour,
"2012-11-22T22:11Z": date_hour_z,
"2012-11-22T22:11-0500": date_hour_est,
"2012-11-22 22:11:10": date_hour_sec,
"2012-11-22T22:11:10Z": date_hour_sec_z,
"2012-11-22T22:11:10-0500": date_hour_sec_est,
"2012-11-22T22:11:10.123Z": date_hour_sec_frac_z,
}
# examples from http://www.w3.org/TR/NOTE-datetime
iso_8601_date = utils.SafeDatetime(year=1997, month=7, day=16)
iso_8601_date_hour_tz = utils.SafeDatetime(
year=1997,
month=7,
day=16,
hour=19,
minute=20,
tzinfo=ZoneInfo("Europe/London"),
)
iso_8601_date_hour_sec_tz = utils.SafeDatetime(
year=1997,
month=7,
day=16,
hour=19,
minute=20,
second=30,
tzinfo=ZoneInfo("Europe/London"),
)
iso_8601_date_hour_sec_ms_tz = utils.SafeDatetime(
year=1997,
month=7,
day=16,
hour=19,
minute=20,
second=30,
microsecond=450000,
tzinfo=ZoneInfo("Europe/London"),
)
iso_8601 = {
"1997-07-16": iso_8601_date,
"1997-07-16T19:20+01:00": iso_8601_date_hour_tz,
"1997-07-16T19:20:30+01:00": iso_8601_date_hour_sec_tz,
"1997-07-16T19:20:30.45+01:00": iso_8601_date_hour_sec_ms_tz,
}
# invalid ones
invalid_dates = ["2010-110-12", "yay"]
for value, expected in dates.items():
self.assertEqual(utils.get_date(value), expected, value)
for value, expected in iso_8601.items():
self.assertEqual(utils.get_date(value), expected, value)
for item in invalid_dates:
self.assertRaises(ValueError, utils.get_date, item)
def test_slugify(self):
samples = (
("this is a test", "this-is-a-test"),
("this is a test", "this-is-a-test"),
("this → is ← a ↑ test", "this-is-a-test"),
("this--is---a test", "this-is-a-test"),
(
"unicode測試許功蓋,你看到了嗎?",
"unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma",
),
(
"大飯原発4号機、18日夜起動へ",
"da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he",
),
)
settings = read_settings()
subs = settings["SLUG_REGEX_SUBSTITUTIONS"]
for value, expected in samples:
self.assertEqual(utils.slugify(value, regex_subs=subs), expected)
self.assertEqual(utils.slugify("Cat", regex_subs=subs), "cat")
self.assertEqual(
utils.slugify("Cat", regex_subs=subs, preserve_case=False), "cat"
)
self.assertEqual(
utils.slugify("Cat", regex_subs=subs, preserve_case=True), "Cat"
)
def test_slugify_use_unicode(self):
samples = (
("this is a test", "this-is-a-test"),
("this is a test", "this-is-a-test"),
("this → is ← a ↑ test", "this-is-a-test"),
("this--is---a test", "this-is-a-test"),
("unicode測試許功蓋,你看到了嗎?", "unicode測試許功蓋你看到了嗎"),
("Çığ", "çığ"),
)
settings = read_settings()
subs = settings["SLUG_REGEX_SUBSTITUTIONS"]
for value, expected in samples:
self.assertEqual(
utils.slugify(value, regex_subs=subs, use_unicode=True), expected
)
# check with preserve case
self.assertEqual(
utils.slugify("Çığ", regex_subs=subs, preserve_case=True, use_unicode=True),
"Çığ",
)
# check normalization
samples = (
("大飯原発4号機、18日夜起動へ", "大飯原発4号機18日夜起動へ"),
(
"\N{LATIN SMALL LETTER C}\N{COMBINING CEDILLA}",
"\N{LATIN SMALL LETTER C WITH CEDILLA}",
),
)
for value, expected in samples:
self.assertEqual(
utils.slugify(value, regex_subs=subs, use_unicode=True), expected
)
def test_slugify_substitute(self):
samples = (
("C++ is based on C", "cpp-is-based-on-c"),
("C+++ test C+ test", "cpp-test-c-test"),
("c++, c#, C#, C++", "cpp-c-sharp-c-sharp-cpp"),
("c++-streams", "cpp-streams"),
)
settings = read_settings()
subs = [
(r"C\+\+", "CPP"),
(r"C#", "C-SHARP"),
] + settings["SLUG_REGEX_SUBSTITUTIONS"]
for value, expected in samples:
self.assertEqual(utils.slugify(value, regex_subs=subs), expected)
def test_slugify_substitute_and_keeping_non_alphanum(self):
samples = (
("Fedora QA", "fedora.qa"),
("C++ is used by Fedora QA", "cpp is used by fedora.qa"),
("C++ is based on C", "cpp is based on c"),
("C+++ test C+ test", "cpp+ test c+ test"),
)
subs = [
(r"Fedora QA", "fedora.qa"),
(r"c\+\+", "cpp"),
]
for value, expected in samples:
self.assertEqual(utils.slugify(value, regex_subs=subs), expected)
def test_get_relative_path(self):
samples = (
(os.path.join("test", "test.html"), os.pardir),
(
os.path.join("test", "test", "test.html"),
os.path.join(os.pardir, os.pardir),
),
("test.html", os.curdir),
(os.path.join("/test", "test.html"), os.pardir),
(
os.path.join("/test", "test", "test.html"),
os.path.join(os.pardir, os.pardir),
),
("/test.html", os.curdir),
)
for value, expected in samples:
self.assertEqual(utils.get_relative_path(value), expected)
def test_truncate_html_words(self):
# Plain text.
self.assertEqual(utils.truncate_html_words("short string", 20), "short string")
self.assertEqual(
utils.truncate_html_words("word " * 100, 20), "word " * 20 + "…"
)
# Plain text with Unicode content.
self.assertEqual(
utils.truncate_html_words(
"我愿意这样,朋友——我独自远行,不但没有你,\
并且再没有别的影在黑暗里。",
12,
),
"我愿意这样,朋友——我独自远行" + " …",
)
self.assertEqual(
utils.truncate_html_words(
"Ты мелькнула, ты предстала, Снова сердце задрожало,", 3
),
"Ты мелькнула, ты" + " …",
)
self.assertEqual(
utils.truncate_html_words("Trong đầm gì đẹp bằng sen", 4),
"Trong đầm gì đẹp" + " …",
)
# Words enclosed or intervaled by HTML tags.
self.assertEqual(
utils.truncate_html_words("<p>" + "word " * 100 + "</p>", 20),
"<p>" + "word " * 20 + "…</p>",
)
self.assertEqual(
utils.truncate_html_words(
'<span\nstyle="\n…\n">' + "word " * 100 + "</span>", 20
),
'<span\nstyle="\n…\n">' + "word " * 20 + "…</span>",
)
self.assertEqual(
utils.truncate_html_words("<br>" + "word " * 100, 20),
"<br>" + "word " * 20 + "…",
)
self.assertEqual(
utils.truncate_html_words("<!-- comment -->" + "word " * 100, 20),
"<!-- comment -->" + "word " * 20 + "…",
)
# Words enclosed or intervaled by HTML tags with a custom end
# marker containing HTML tags.
self.assertEqual(
utils.truncate_html_words(
"<p>" + "word " * 100 + "</p>", 20, "<span>marker</span>"
),
"<p>" + "word " * 20 + "<span>marker</span></p>",
)
self.assertEqual(
utils.truncate_html_words(
'<span\nstyle="\n…\n">' + "word " * 100 + "</span>",
20,
"<span>marker</span>",
),
'<span\nstyle="\n…\n">' + "word " * 20 + "<span>marker</span></span>",
)
self.assertEqual(
utils.truncate_html_words(
"<br>" + "word " * 100, 20, "<span>marker</span>"
),
"<br>" + "word " * 20 + "<span>marker</span>",
)
self.assertEqual(
utils.truncate_html_words(
"<!-- comment -->" + "word " * 100, 20, "<span>marker</span>"
),
"<!-- comment -->" + "word " * 20 + "<span>marker</span>",
)
# Words with hypens and apostrophes.
self.assertEqual(utils.truncate_html_words("a-b " * 100, 20), "a-b " * 20 + "…")
self.assertEqual(
utils.truncate_html_words("it's " * 100, 20), "it's " * 20 + "…"
)
# Words with HTML entity references.
self.assertEqual(
utils.truncate_html_words("é " * 100, 20), "é " * 20 + "…"
)
self.assertEqual(
utils.truncate_html_words("café " * 100, 20),
"café " * 20 + "…",
)
self.assertEqual(
utils.truncate_html_words("èlite " * 100, 20),
"èlite " * 20 + "…",
)
self.assertEqual(
utils.truncate_html_words("cafetiére " * 100, 20),
"cafetiére " * 20 + "…",
)
self.assertEqual(
utils.truncate_html_words("∫dx " * 100, 20), "∫dx " * 20 + "…"
)
# Words with HTML character references inside and outside
# the ASCII range.
self.assertEqual(
utils.truncate_html_words("é " * 100, 20), "é " * 20 + "…"
)
self.assertEqual(
utils.truncate_html_words("∫dx " * 100, 20), "∫dx " * 20 + "…"
)
# Words with invalid or broken HTML references.
self.assertEqual(utils.truncate_html_words("&invalid;", 20), "&invalid;")
self.assertEqual(
utils.truncate_html_words("�", 20), "�"
)
self.assertEqual(
utils.truncate_html_words("�", 20), "�"
)
self.assertEqual(utils.truncate_html_words("&mdash text", 20), "&mdash text")
self.assertEqual(utils.truncate_html_words("Ӓ text", 20), "Ӓ text")
self.assertEqual(utils.truncate_html_words("઼ text", 20), "઼ text")
def test_truncate_html_paragraphs(self):
one = "<p>one</p>"
self.assertEqual(utils.truncate_html_paragraphs(one, 0), "")
self.assertEqual(utils.truncate_html_paragraphs(one, 1), one)
self.assertEqual(utils.truncate_html_paragraphs(one, 2), one)
two = one + "<p>two</p>"
self.assertEqual(utils.truncate_html_paragraphs(two, 1), one)
self.assertEqual(utils.truncate_html_paragraphs(two, 2), two)
three = two + "<p>three</p>"
self.assertEqual(utils.truncate_html_paragraphs(three, 1), one)
self.assertEqual(utils.truncate_html_paragraphs(three, 2), two)
self.assertEqual(utils.truncate_html_paragraphs(three, 3), three)
self.assertEqual(utils.truncate_html_paragraphs(three, 4), three)
def test_process_translations(self):
fr_articles = []
en_articles = []
# create a bunch of articles
# 0: no translation metadata
fr_articles.append(
get_article(lang="fr", slug="yay0", title="Titre", content="en français")
)
en_articles.append(
get_article(lang="en", slug="yay0", title="Title", content="in english")
)
# 1: translation metadata on default lang
fr_articles.append(
get_article(lang="fr", slug="yay1", title="Titre", content="en français")
)
en_articles.append(
get_article(
lang="en",
slug="yay1",
title="Title",
content="in english",
translation="true",
)
)
# 2: translation metadata not on default lang
fr_articles.append(
get_article(
lang="fr",
slug="yay2",
title="Titre",
content="en français",
translation="true",
)
)
en_articles.append(
get_article(lang="en", slug="yay2", title="Title", content="in english")
)
# 3: back to default language detection if all items have the
# translation metadata
fr_articles.append(
get_article(
lang="fr",
slug="yay3",
title="Titre",
content="en français",
translation="yep",
)
)
en_articles.append(
get_article(
lang="en",
slug="yay3",
title="Title",
content="in english",
translation="yes",
)
)
# 4-5: translation pairs with the same slug but different category
fr_articles.append(
get_article(
lang="fr",
slug="yay4",
title="Titre",
content="en français",
category="foo",
)
)
en_articles.append(
get_article(
lang="en",
slug="yay4",
title="Title",
content="in english",
category="foo",
)
)
fr_articles.append(
get_article(
lang="fr",
slug="yay4",
title="Titre",
content="en français",
category="bar",
)
)
en_articles.append(
get_article(
lang="en",
slug="yay4",
title="Title",
content="in english",
category="bar",
)
)
# try adding articles in both orders
for lang0_articles, lang1_articles in (
(fr_articles, en_articles),
(en_articles, fr_articles),
):
articles = lang0_articles + lang1_articles
# test process_translations with falsy translation_id
index, trans = utils.process_translations(articles, translation_id=None)
for i in range(6):
for lang_articles in [en_articles, fr_articles]:
self.assertIn(lang_articles[i], index)
self.assertNotIn(lang_articles[i], trans)
# test process_translations with simple and complex translation_id
for translation_id in ["slug", {"slug", "category"}]:
index, trans = utils.process_translations(
articles, translation_id=translation_id
)
for a in [
en_articles[0],
fr_articles[1],
en_articles[2],
en_articles[3],
en_articles[4],
en_articles[5],
]:
self.assertIn(a, index)
self.assertNotIn(a, trans)
for a in [
fr_articles[0],
en_articles[1],
fr_articles[2],
fr_articles[3],
fr_articles[4],
fr_articles[5],
]:
self.assertIn(a, trans)
self.assertNotIn(a, index)
for i in range(6):
self.assertIn(en_articles[i], fr_articles[i].translations)
self.assertIn(fr_articles[i], en_articles[i].translations)
for a_arts in [en_articles, fr_articles]:
for b_arts in [en_articles, fr_articles]:
if translation_id == "slug":
self.assertIn(a_arts[4], b_arts[5].translations)
self.assertIn(a_arts[5], b_arts[4].translations)
elif translation_id == {"slug", "category"}:
self.assertNotIn(a_arts[4], b_arts[5].translations)
self.assertNotIn(a_arts[5], b_arts[4].translations)
def test_clean_output_dir(self):
retention = ()
test_directory = os.path.join(self.temp_output, "clean_output")
content = os.path.join(os.path.dirname(__file__), "content")
shutil.copytree(content, test_directory)
utils.clean_output_dir(test_directory, retention)
self.assertTrue(os.path.isdir(test_directory))
self.assertListEqual([], os.listdir(test_directory))
shutil.rmtree(test_directory)
def test_clean_output_dir_not_there(self):
retention = ()
test_directory = os.path.join(self.temp_output, "does_not_exist")
utils.clean_output_dir(test_directory, retention)
self.assertFalse(os.path.exists(test_directory))
def test_clean_output_dir_is_file(self):
retention = ()
test_directory = os.path.join(self.temp_output, "this_is_a_file")
f = open(test_directory, "w")
f.write("")
f.close()
utils.clean_output_dir(test_directory, retention)
self.assertFalse(os.path.exists(test_directory))
def test_strftime(self):
d = utils.SafeDatetime(2012, 8, 29)
# simple formatting
self.assertEqual(utils.strftime(d, "%d/%m/%y"), "29/08/12")
self.assertEqual(utils.strftime(d, "%d/%m/%Y"), "29/08/2012")
# RFC 3339
self.assertEqual(
utils.strftime(d, "%Y-%m-%dT%H:%M:%SZ"), "2012-08-29T00:00:00Z"
)
# % escaped
self.assertEqual(utils.strftime(d, "%d%%%m%%%y"), "29%08%12")
self.assertEqual(utils.strftime(d, "%d %% %m %% %y"), "29 % 08 % 12")
# not valid % formatter
self.assertEqual(
utils.strftime(d, "10% reduction in %Y"), "10% reduction in 2012"
)
self.assertEqual(
utils.strftime(d, "%10 reduction in %Y"), "%10 reduction in 2012"
)
# with text
self.assertEqual(
utils.strftime(d, "Published in %d-%m-%Y"), "Published in 29-08-2012"
)
# with non-ascii text
self.assertEqual(
utils.strftime(d, "%d/%m/%Y Øl trinken beim Besäufnis"),
"29/08/2012 Øl trinken beim Besäufnis",
)
# alternative formatting options
self.assertEqual(utils.strftime(d, "%-d/%-m/%y"), "29/8/12")
self.assertEqual(utils.strftime(d, "%-H:%-M:%-S"), "0:0:0")
d = utils.SafeDatetime(2012, 8, 9)
self.assertEqual(utils.strftime(d, "%-d/%-m/%y"), "9/8/12")
d = utils.SafeDatetime(2021, 1, 8)
self.assertEqual(utils.strftime(d, "%G - %-V - %u"), "2021 - 1 - 5")
# test the output of utils.strftime in a different locale
# Turkish locale
@unittest.skipUnless(
locale_available("tr_TR.UTF-8") or locale_available("Turkish"),
"Turkish locale needed",
)
def test_strftime_locale_dependent_turkish(self):
temp_locale = "Turkish" if platform == "win32" else "tr_TR.UTF-8"
with utils.temporary_locale(temp_locale):
d = utils.SafeDatetime(2012, 8, 29)
# simple
self.assertEqual(utils.strftime(d, "%d %B %Y"), "29 Ağustos 2012")
self.assertEqual(
utils.strftime(d, "%A, %d %B %Y"), "Çarşamba, 29 Ağustos 2012"
)
# with text
self.assertEqual(
utils.strftime(d, "Yayınlanma tarihi: %A, %d %B %Y"),
"Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012",
)
# non-ascii format candidate (someone might pass it… for some reason)
self.assertEqual(
utils.strftime(d, "%Y yılında %üretim artışı"),
"2012 yılında %üretim artışı",
)
# test the output of utils.strftime in a different locale
# French locale
@unittest.skipUnless(
locale_available("fr_FR.UTF-8") or locale_available("French"),
"French locale needed",
)
def test_strftime_locale_dependent_french(self):
temp_locale = "French" if platform == "win32" else "fr_FR.UTF-8"
with utils.temporary_locale(temp_locale):
d = utils.SafeDatetime(2012, 8, 29)
# simple
self.assertEqual(utils.strftime(d, "%d %B %Y"), "29 août 2012")
# depending on OS, the first letter is m or M
self.assertTrue(utils.strftime(d, "%A") in ("mercredi", "Mercredi"))
# with text
self.assertEqual(
utils.strftime(d, "Écrit le %d %B %Y"), "Écrit le 29 août 2012"
)
# non-ascii format candidate (someone might pass it… for some reason)
self.assertEqual(utils.strftime(d, "%écrits en %Y"), "%écrits en 2012")
def test_maybe_pluralize(self):
self.assertEqual(utils.maybe_pluralize(0, "Article", "Articles"), "0 Articles")
self.assertEqual(utils.maybe_pluralize(1, "Article", "Articles"), "1 Article")
self.assertEqual(utils.maybe_pluralize(2, "Article", "Articles"), "2 Articles")
def test_temporary_locale(self):
# test with default LC category
orig_locale = locale.setlocale(locale.LC_ALL)
with utils.temporary_locale("C"):
self.assertEqual(locale.setlocale(locale.LC_ALL), "C")
self.assertEqual(locale.setlocale(locale.LC_ALL), orig_locale)
# test with custom LC category
orig_locale = locale.setlocale(locale.LC_TIME)
with utils.temporary_locale("C", locale.LC_TIME):
self.assertEqual(locale.setlocale(locale.LC_TIME), "C")
self.assertEqual(locale.setlocale(locale.LC_TIME), orig_locale)
class TestCopy(unittest.TestCase):
"""Tests the copy utility"""
def setUp(self):
self.root_dir = mkdtemp(prefix="pelicantests.")
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, "C")
def tearDown(self):
shutil.rmtree(self.root_dir)
locale.setlocale(locale.LC_ALL, self.old_locale)
def _create_file(self, *path):
with open(os.path.join(self.root_dir, *path), "w") as f:
f.write("42\n")
def _create_dir(self, *path):
os.makedirs(os.path.join(self.root_dir, *path))
def _exist_file(self, *path):
path = os.path.join(self.root_dir, *path)
self.assertTrue(os.path.isfile(path), f"File does not exist: {path}")
def _exist_dir(self, *path):
path = os.path.join(self.root_dir, *path)
self.assertTrue(os.path.exists(path), f"Directory does not exist: {path}")
def test_copy_file_same_path(self):
self._create_file("a.txt")
utils.copy(
os.path.join(self.root_dir, "a.txt"), os.path.join(self.root_dir, "b.txt")
)
self._exist_file("b.txt")
def test_copy_file_different_path(self):
self._create_dir("a")
self._create_dir("b")
self._create_file("a", "a.txt")
utils.copy(
os.path.join(self.root_dir, "a", "a.txt"),
os.path.join(self.root_dir, "b", "b.txt"),
)
self._exist_dir("b")
self._exist_file("b", "b.txt")
def test_copy_file_create_dirs(self):
self._create_file("a.txt")
utils.copy(
os.path.join(self.root_dir, "a.txt"),
os.path.join(self.root_dir, "b0", "b1", "b2", "b3", "b.txt"),
)
self._exist_dir("b0")
self._exist_dir("b0", "b1")
self._exist_dir("b0", "b1", "b2")
self._exist_dir("b0", "b1", "b2", "b3")
self._exist_file("b0", "b1", "b2", "b3", "b.txt")
def test_copy_dir_same_path(self):
self._create_dir("a")
self._create_file("a", "a.txt")
utils.copy(os.path.join(self.root_dir, "a"), os.path.join(self.root_dir, "b"))
self._exist_dir("b")
self._exist_file("b", "a.txt")
def test_copy_dir_different_path(self):
self._create_dir("a0")
self._create_dir("a0", "a1")
self._create_file("a0", "a1", "a.txt")
self._create_dir("b0")
utils.copy(
os.path.join(self.root_dir, "a0", "a1"),
os.path.join(self.root_dir, "b0", "b1"),
)
self._exist_dir("b0", "b1")
self._exist_file("b0", "b1", "a.txt")
def test_copy_dir_create_dirs(self):
self._create_dir("a")
self._create_file("a", "a.txt")
utils.copy(
os.path.join(self.root_dir, "a"),
os.path.join(self.root_dir, "b0", "b1", "b2", "b3", "b"),
)
self._exist_dir("b0")
self._exist_dir("b0", "b1")
self._exist_dir("b0", "b1", "b2")
self._exist_dir("b0", "b1", "b2", "b3")
self._exist_dir("b0", "b1", "b2", "b3", "b")
self._exist_file("b0", "b1", "b2", "b3", "b", "a.txt")
class TestDateFormatter(unittest.TestCase):
"""Tests that the output of DateFormatter jinja filter is same as
utils.strftime"""
def setUp(self):
# prepare a temp content and output folder
self.temp_content = mkdtemp(prefix="pelicantests.")
self.temp_output = mkdtemp(prefix="pelicantests.")
# prepare a template file
template_dir = os.path.join(self.temp_content, "template")
template_path = os.path.join(template_dir, "source.html")
os.makedirs(template_dir)
with open(template_path, "w") as template_file:
template_file.write('date = {{ date|strftime("%A, %d %B %Y") }}')
self.date = utils.SafeDatetime(2012, 8, 29)
def tearDown(self):
shutil.rmtree(self.temp_content)
shutil.rmtree(self.temp_output)
# reset locale to default
locale.setlocale(locale.LC_ALL, "")
@unittest.skipUnless(
locale_available("fr_FR.UTF-8") or locale_available("French"),
"French locale needed",
)
def test_french_strftime(self):
# This test tries to reproduce an issue that
# occurred with python3.3 under macos10 only
temp_locale = "French" if platform == "win32" else "fr_FR.UTF-8"
with utils.temporary_locale(temp_locale):
date = utils.SafeDatetime(2014, 8, 14)
# we compare the lower() dates since macos10 returns
# "Jeudi" for %A whereas linux reports "jeudi"
self.assertEqual(
"jeudi, 14 août 2014",
utils.strftime(date, date_format="%A, %d %B %Y").lower(),
)
df = utils.DateFormatter()
self.assertEqual(
"jeudi, 14 août 2014", df(date, date_format="%A, %d %B %Y").lower()
)
# Let us now set the global locale to C:
with utils.temporary_locale("C"):
# DateFormatter should still work as expected
# since it is the whole point of DateFormatter
# (This is where pre-2014/4/15 code fails on macos10)
df_date = df(date, date_format="%A, %d %B %Y").lower()
self.assertEqual("jeudi, 14 août 2014", df_date)
@unittest.skipUnless(
locale_available("fr_FR.UTF-8") or locale_available("French"),
"French locale needed",
)
def test_french_locale(self):
if platform == "win32":
locale_string = "French"
else:
locale_string = "fr_FR.UTF-8"
settings = read_settings(
override={
"LOCALE": locale_string,
"TEMPLATE_PAGES": {"template/source.html": "generated/file.html"},
}
)
generator = TemplatePagesGenerator(
{"date": self.date}, settings, self.temp_content, "", self.temp_output
)
generator.env.filters.update({"strftime": utils.DateFormatter()})
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(self.temp_output, "generated", "file.html")
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with utils.pelican_open(output_path) as output_file:
self.assertEqual(
output_file, utils.strftime(self.date, "date = %A, %d %B %Y")
)
@unittest.skipUnless(
locale_available("tr_TR.UTF-8") or locale_available("Turkish"),
"Turkish locale needed",
)
def test_turkish_locale(self):
if platform == "win32":
locale_string = "Turkish"
else:
locale_string = "tr_TR.UTF-8"
settings = read_settings(
override={
"LOCALE": locale_string,
"TEMPLATE_PAGES": {"template/source.html": "generated/file.html"},
}
)
generator = TemplatePagesGenerator(
{"date": self.date}, settings, self.temp_content, "", self.temp_output
)
generator.env.filters.update({"strftime": utils.DateFormatter()})
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(self.temp_output, "generated", "file.html")
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with utils.pelican_open(output_path) as output_file:
self.assertEqual(
output_file, utils.strftime(self.date, "date = %A, %d %B %Y")
)
class TestSanitisedJoin(unittest.TestCase):
def test_detect_parent_breakout(self):
with self.assertRaisesRegex(
RuntimeError,
"Attempted to break out of output directory to (.*?:)?/foo/test",
): # (.*?:)? accounts for Windows root
utils.sanitised_join("/foo/bar", "../test")
def test_detect_root_breakout(self):
with self.assertRaisesRegex(
RuntimeError,
"Attempted to break out of output directory to (.*?:)?/test",
): # (.*?:)? accounts for Windows root
utils.sanitised_join("/foo/bar", "/test")
def test_pass_deep_subpaths(self):
self.assertEqual(
utils.sanitised_join("/foo/bar", "test"),
utils.posixize_path(os.path.abspath(os.path.join("/foo/bar", "test"))),
)
class TestMemoized(unittest.TestCase):
def test_memoized(self):
class Container:
def _get(self, key):
pass
@utils.memoized
def get(self, key):
return self._get(key)
container = Container()
with unittest.mock.patch.object(
container, "_get", side_effect=lambda x: x
) as get_mock:
self.assertEqual("foo", container.get("foo"))
get_mock.assert_called_once_with("foo")
get_mock.reset_mock()
self.assertEqual("foo", container.get("foo"))
get_mock.assert_not_called()
self.assertEqual("bar", container.get("bar"))
get_mock.assert_called_once_with("bar")
get_mock.reset_mock()
container.get.cache.clear()
self.assertEqual("bar", container.get("bar"))
get_mock.assert_called_once_with("bar")
class TestStringUtils(unittest.TestCase):
def test_file_suffix(self):
self.assertEqual("", utils.file_suffix(""))
self.assertEqual("", utils.file_suffix("foo"))
self.assertEqual("md", utils.file_suffix("foo.md"))
| 35,426
|
Python
|
.py
| 863
| 29.341831
| 88
| 0.539629
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,225
|
default_conf.py
|
getpelican_pelican/pelican/tests/default_conf.py
|
AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log"
SITEURL = "http://blog.notmyidea.org"
TIMEZONE = "UTC"
GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea"
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
DEFAULT_PAGINATION = 2
FEED_RSS = "feeds/all.rss.xml"
CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
LINKS = (
("Biologeek", "http://biologeek.org"),
("Filyb", "http://filyb.info/"),
("Libert-fr", "http://www.libert-fr.com"),
("N1k0", "http://prendreuncafe.com/blog/"),
("Tarek Ziadé", "http://ziade.org/blog"),
("Zubin Mithra", "http://zubin71.wordpress.com/"),
)
SOCIAL = (
("twitter", "http://twitter.com/ametaireau"),
("lastfm", "http://lastfm.com/user/akounet"),
("github", "http://github.com/ametaireau"),
)
# global metadata to all the contents
DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata
EXTRA_PATH_METADATA = {
"extra/robots.txt": {"path": "robots.txt"},
}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
"pictures",
"extra/robots.txt",
]
FORMATTED_FIELDS = ["summary", "custom_formatted_field"]
# foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps
foobar = "barbaz"
| 1,263
|
Python
|
.py
| 39
| 29.871795
| 75
| 0.68343
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,226
|
test_cache.py
|
getpelican_pelican/pelican/tests/test_cache.py
|
import os
from shutil import rmtree
from tempfile import mkdtemp
from unittest.mock import MagicMock
from pelican.generators import ArticlesGenerator, PagesGenerator
from pelican.tests.support import get_context, get_settings, unittest
CUR_DIR = os.path.dirname(__file__)
CONTENT_DIR = os.path.join(CUR_DIR, "content")
class TestCache(unittest.TestCase):
def setUp(self):
self.temp_cache = mkdtemp(prefix="pelican_cache.")
def tearDown(self):
rmtree(self.temp_cache)
def _get_cache_enabled_settings(self):
settings = get_settings()
settings["CACHE_CONTENT"] = True
settings["LOAD_CONTENT_CACHE"] = True
settings["CACHE_PATH"] = self.temp_cache
return settings
def test_generator_caching(self):
"""Test that cached and uncached content is same in generator level"""
settings = self._get_cache_enabled_settings()
settings["CONTENT_CACHING_LAYER"] = "generator"
settings["PAGE_PATHS"] = ["TestPages"]
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["READERS"] = {"asc": None}
context = get_context(settings)
def sorted_titles(items):
return sorted(item.title for item in items)
# Articles
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
uncached_articles = sorted_titles(generator.articles)
uncached_drafts = sorted_titles(generator.drafts)
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
cached_articles = sorted_titles(generator.articles)
cached_drafts = sorted_titles(generator.drafts)
self.assertEqual(uncached_articles, cached_articles)
self.assertEqual(uncached_drafts, cached_drafts)
# Pages
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
uncached_pages = sorted_titles(generator.pages)
uncached_hidden_pages = sorted_titles(generator.hidden_pages)
uncached_draft_pages = sorted_titles(generator.draft_pages)
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
cached_pages = sorted_titles(generator.pages)
cached_hidden_pages = sorted_titles(generator.hidden_pages)
cached_draft_pages = sorted_titles(generator.draft_pages)
self.assertEqual(uncached_pages, cached_pages)
self.assertEqual(uncached_hidden_pages, cached_hidden_pages)
self.assertEqual(uncached_draft_pages, cached_draft_pages)
def test_reader_caching(self):
"""Test that cached and uncached content is same in reader level"""
settings = self._get_cache_enabled_settings()
settings["CONTENT_CACHING_LAYER"] = "reader"
settings["PAGE_PATHS"] = ["TestPages"]
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["READERS"] = {"asc": None}
context = get_context(settings)
def sorted_titles(items):
return sorted(item.title for item in items)
# Articles
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
uncached_articles = sorted_titles(generator.articles)
uncached_drafts = sorted_titles(generator.drafts)
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
cached_articles = sorted_titles(generator.articles)
cached_drafts = sorted_titles(generator.drafts)
self.assertEqual(uncached_articles, cached_articles)
self.assertEqual(uncached_drafts, cached_drafts)
# Pages
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
uncached_pages = sorted_titles(generator.pages)
uncached_hidden_pages = sorted_titles(generator.hidden_pages)
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
cached_pages = sorted_titles(generator.pages)
cached_hidden_pages = sorted_titles(generator.hidden_pages)
self.assertEqual(uncached_pages, cached_pages)
self.assertEqual(uncached_hidden_pages, cached_hidden_pages)
def test_article_object_caching(self):
"""Test Article objects caching at the generator level"""
settings = self._get_cache_enabled_settings()
settings["CONTENT_CACHING_LAYER"] = "generator"
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
self.assertTrue(hasattr(generator, "_cache"))
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
"""
7 files don't get cached because they were not valid
- article_with_attributes_containing_double_quotes.html
- article_with_comments.html
- article_with_null_attributes.html
- 2012-11-30_md_w_filename_meta#foo-bar.md
- empty.md
- empty_with_bom.md
- article_skip.md
"""
self.assertEqual(generator.readers.read_file.call_count, 7)
def test_article_reader_content_caching(self):
"""Test raw article content caching at the reader level"""
settings = self._get_cache_enabled_settings()
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
self.assertTrue(hasattr(generator.readers, "_cache"))
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
readers = generator.readers.readers
for reader in readers.values():
reader.read = MagicMock()
generator.generate_context()
for reader in readers.values():
self.assertEqual(reader.read.call_count, 0)
def test_article_ignore_cache(self):
"""Test that all the articles are read again when not loading cache
used in --ignore-cache or autoreload mode"""
settings = self._get_cache_enabled_settings()
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
self.assertTrue(hasattr(generator, "_cache_open"))
orig_call_count = generator.readers.read_file.call_count
settings["LOAD_CONTENT_CACHE"] = False
generator = ArticlesGenerator(
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
self.assertEqual(generator.readers.read_file.call_count, orig_call_count)
def test_page_object_caching(self):
"""Test Page objects caching at the generator level"""
settings = self._get_cache_enabled_settings()
settings["CONTENT_CACHING_LAYER"] = "generator"
settings["PAGE_PATHS"] = ["TestPages"]
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
self.assertTrue(hasattr(generator, "_cache"))
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
"""
1 File doesn't get cached because it was not valid
- bad_page.rst
"""
self.assertEqual(generator.readers.read_file.call_count, 1)
def test_page_reader_content_caching(self):
"""Test raw page content caching at the reader level"""
settings = self._get_cache_enabled_settings()
settings["PAGE_PATHS"] = ["TestPages"]
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
self.assertTrue(hasattr(generator.readers, "_cache"))
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
readers = generator.readers.readers
for reader in readers.values():
reader.read = MagicMock()
generator.generate_context()
for reader in readers.values():
self.assertEqual(reader.read.call_count, 0)
def test_page_ignore_cache(self):
"""Test that all the pages are read again when not loading cache
used in --ignore_cache or autoreload mode"""
settings = self._get_cache_enabled_settings()
settings["PAGE_PATHS"] = ["TestPages"]
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
self.assertTrue(hasattr(generator, "_cache_open"))
orig_call_count = generator.readers.read_file.call_count
settings["LOAD_CONTENT_CACHE"] = False
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
self.assertEqual(generator.readers.read_file.call_count, orig_call_count)
| 12,381
|
Python
|
.py
| 312
| 29.560897
| 81
| 0.612954
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,227
|
test_importer.py
|
getpelican_pelican/pelican/tests/test_importer.py
|
import os
import re
from posixpath import join as posix_join
from unittest.mock import patch
from pelican.settings import DEFAULT_CONFIG
from pelican.tests.support import (
TestCaseWithCLocale,
mute,
skipIfNoExecutable,
temporary_folder,
unittest,
)
from pelican.tools.pelican_import import (
blogger2fields,
build_header,
build_markdown_header,
decode_wp_content,
download_attachments,
fields2pelican,
get_attachments,
medium_slug,
mediumpost2fields,
mediumposts2fields,
strip_medium_post_content,
tumblr2fields,
wp2fields,
)
from pelican.utils import path_to_file_url, slugify
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
BLOGGER_XML_SAMPLE = os.path.join(CUR_DIR, "content", "bloggerexport.xml")
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, "content", "wordpressexport.xml")
WORDPRESS_ENCODED_CONTENT_SAMPLE = os.path.join(
CUR_DIR, "content", "wordpress_content_encoded"
)
WORDPRESS_DECODED_CONTENT_SAMPLE = os.path.join(
CUR_DIR, "content", "wordpress_content_decoded"
)
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = False
try:
import bs4.builder._lxml as LXML
except ImportError:
LXML = False
@skipIfNoExecutable(["pandoc", "--version"])
@unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
class TestBloggerXmlImporter(TestCaseWithCLocale):
def setUp(self):
super().setUp()
self.posts = blogger2fields(BLOGGER_XML_SAMPLE)
def test_recognise_kind_and_title(self):
"""Check that importer only outputs pages, articles and comments,
that these are correctly identified and that titles are correct.
"""
test_posts = list(self.posts)
kinds = {x[8] for x in test_posts}
self.assertEqual({"page", "article", "comment"}, kinds)
page_titles = {x[0] for x in test_posts if x[8] == "page"}
self.assertEqual({"Test page", "Test page 2"}, page_titles)
article_titles = {x[0] for x in test_posts if x[8] == "article"}
self.assertEqual(
{"Black as Egypt's Night", "The Steel Windpipe"}, article_titles
)
comment_titles = {x[0] for x in test_posts if x[8] == "comment"}
self.assertEqual(
{"Mishka, always a pleasure to read your adventures!..."}, comment_titles
)
def test_recognise_status_with_correct_filename(self):
"""Check that importerer outputs only statuses 'published' and 'draft',
that these are correctly identified and that filenames are correct.
"""
test_posts = list(self.posts)
statuses = {x[7] for x in test_posts}
self.assertEqual({"published", "draft"}, statuses)
draft_filenames = {x[2] for x in test_posts if x[7] == "draft"}
# draft filenames are id-based
self.assertEqual(
{"page-4386962582497458967", "post-1276418104709695660"}, draft_filenames
)
published_filenames = {x[2] for x in test_posts if x[7] == "published"}
# published filenames are url-based, except comments
self.assertEqual(
{"the-steel-windpipe", "test-page", "post-5590533389087749201"},
published_filenames,
)
@skipIfNoExecutable(["pandoc", "--version"])
@unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
class TestWordpressXmlImporter(TestCaseWithCLocale):
def setUp(self):
super().setUp()
self.posts = wp2fields(WORDPRESS_XML_SAMPLE)
self.custposts = wp2fields(WORDPRESS_XML_SAMPLE, True)
def test_ignore_empty_posts(self):
self.assertTrue(self.posts)
for (
title,
_content,
_fname,
_date,
_author,
_categ,
_tags,
_status,
_kind,
_format,
) in self.posts:
self.assertTrue(title.strip())
def test_recognise_page_kind(self):
"""Check that we recognise pages in wordpress, as opposed to posts"""
self.assertTrue(self.posts)
# Collect (title, filename, kind) of non-empty posts recognised as page
pages_data = []
for (
title,
_content,
fname,
_date,
_author,
_categ,
_tags,
_status,
kind,
_format,
) in self.posts:
if kind == "page":
pages_data.append((title, fname))
self.assertEqual(2, len(pages_data))
self.assertEqual(("Page", "contact"), pages_data[0])
self.assertEqual(("Empty Page", "empty"), pages_data[1])
def test_dirpage_directive_for_page_kind(self):
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts)
with temporary_folder() as temp:
fname = next(iter(silent_f2p(test_post, "markdown", temp, dirpage=True)))
self.assertTrue(fname.endswith(f"pages{os.path.sep}empty.md"))
def test_dircat(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = [
post
for post in self.posts
# check post has a category
if len(post[5]) > 0
]
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, "markdown", temp, dircat=True))
subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
index = 0
for post in test_posts:
name = post[2]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
name += ".md"
filename = os.path.join(category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
index += 1
def test_unless_custom_post_all_items_should_be_pages_or_posts(self):
self.assertTrue(self.posts)
pages_data = []
for (
title,
_content,
fname,
_date,
_author,
_categ,
_tags,
_status,
kind,
_format,
) in self.posts:
if kind in {"page", "article"}:
pass
else:
pages_data.append((title, fname))
self.assertEqual(0, len(pages_data))
def test_recognise_custom_post_type(self):
self.assertTrue(self.custposts)
cust_data = []
for (
title,
_content,
_fname,
_date,
_author,
_categ,
_tags,
_status,
kind,
_format,
) in self.custposts:
if kind in {"page", "article"}:
pass
else:
cust_data.append((title, kind))
self.assertEqual(3, len(cust_data))
self.assertEqual(("A custom post in category 4", "custom1"), cust_data[0])
self.assertEqual(("A custom post in category 5", "custom1"), cust_data[1])
self.assertEqual(
("A 2nd custom post type also in category 5", "custom2"), cust_data[2]
)
def test_custom_posts_put_in_own_dir(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == "article" or post[8] == "page":
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, "markdown", temp, wp_custpost=True))
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
name += ".md"
filename = os.path.join(kind, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
index += 1
def test_custom_posts_put_in_own_dir_and_catagory_sub_dir(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == "article" or post[8] == "page":
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(
silent_f2p(test_posts, "markdown", temp, wp_custpost=True, dircat=True)
)
subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
name += ".md"
filename = os.path.join(kind, category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
index += 1
def test_wp_custpost_true_dirpage_false(self):
# pages should only be put in their own directory when dirpage = True
silent_f2p = mute(True)(fields2pelican)
test_posts = [
post
for post in self.custposts
# check post kind
if post[8] == "page"
]
with temporary_folder() as temp:
fnames = list(
silent_f2p(
test_posts, "markdown", temp, wp_custpost=True, dirpage=False
)
)
index = 0
for post in test_posts:
name = post[2]
name += ".md"
filename = os.path.join("pages", name)
out_name = fnames[index]
self.assertFalse(out_name.endswith(filename))
def test_can_toggle_raw_html_code_parsing(self):
test_posts = list(self.posts)
def r(f):
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
with temporary_folder() as temp:
rst_files = (r(f) for f in silent_f2p(test_posts, "markdown", temp))
self.assertTrue(any("<iframe" in rst for rst in rst_files))
rst_files = (
r(f) for f in silent_f2p(test_posts, "markdown", temp, strip_raw=True)
)
self.assertFalse(any("<iframe" in rst for rst in rst_files))
# no effect in rst
rst_files = (r(f) for f in silent_f2p(test_posts, "rst", temp))
self.assertFalse(any("<iframe" in rst for rst in rst_files))
rst_files = (
r(f) for f in silent_f2p(test_posts, "rst", temp, strip_raw=True)
)
self.assertFalse(any("<iframe" in rst for rst in rst_files))
def test_decode_html_entities_in_titles(self):
test_posts = [post for post in self.posts if post[2] == "html-entity-test"]
self.assertEqual(len(test_posts), 1)
post = test_posts[0]
title = post[0]
self.assertTrue(
title,
"A normal post with some <html> entities in "
"the title. You can't miss them.",
)
self.assertNotIn("&", title)
def test_decode_wp_content_returns_empty(self):
"""Check that given an empty string we return an empty string."""
self.assertEqual(decode_wp_content(""), "")
def test_decode_wp_content(self):
"""Check that we can decode a wordpress content string."""
with open(WORDPRESS_ENCODED_CONTENT_SAMPLE) as encoded_file:
encoded_content = encoded_file.read()
with open(WORDPRESS_DECODED_CONTENT_SAMPLE) as decoded_file:
decoded_content = decoded_file.read()
self.assertEqual(
decode_wp_content(encoded_content, br=False), decoded_content
)
def test_preserve_verbatim_formatting(self):
def r(f):
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
with temporary_folder() as temp:
md = next(r(f) for f in silent_f2p(test_post, "markdown", temp))
self.assertTrue(re.search(r"\s+a = \[1, 2, 3\]", md))
self.assertTrue(re.search(r"\s+b = \[4, 5, 6\]", md))
for_line = re.search(r"\s+for i in zip\(a, b\):", md).group(0)
print_line = re.search(r"\s+print i", md).group(0)
self.assertTrue(for_line.rindex("for") < print_line.rindex("print"))
def test_code_in_list(self):
def r(f):
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
with temporary_folder() as temp:
md = next(r(f) for f in silent_f2p(test_post, "markdown", temp))
sample_line = re.search(r"- This is a code sample", md).group(0)
code_line = re.search(r"\s+a = \[1, 2, 3\]", md).group(0)
self.assertTrue(sample_line.rindex("This") < code_line.rindex("a"))
def test_dont_use_smart_quotes(self):
def r(f):
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Post with raw data"), self.posts)
with temporary_folder() as temp:
md = next(r(f) for f in silent_f2p(test_post, "markdown", temp))
escaped_quotes = re.search(r'\\[\'"“”‘’]', md) # noqa: RUF001
self.assertFalse(escaped_quotes)
def test_convert_caption_to_figure(self):
def r(f):
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Caption on image"), self.posts)
with temporary_folder() as temp:
md = next(r(f) for f in silent_f2p(test_post, "markdown", temp))
caption = re.search(r"\[caption", md)
self.assertFalse(caption)
for occurence in [
"/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png",
"/theme/img/xpelican-3.png.pagespeed.ic.m-NAIdRCOM.png",
"/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png",
"This is a pelican",
"This also a pelican",
"Yet another pelican",
]:
# pandoc 2.x converts into 
# pandoc 3.x converts into <figure>src<figcaption>text</figcaption></figure>
self.assertIn(occurence, md)
class TestBuildHeader(unittest.TestCase):
def test_build_header(self):
header = build_header("test", None, None, None, None, None)
self.assertEqual(header, "test\n####\n\n")
def test_build_header_with_fields(self):
header_data = [
"Test Post",
"2014-11-04",
"Alexis Métaireau",
["Programming"],
["Pelican", "Python"],
"test-post",
]
expected_docutils = "\n".join(
[
"Test Post",
"#########",
":date: 2014-11-04",
":author: Alexis Métaireau",
":category: Programming",
":tags: Pelican, Python",
":slug: test-post",
"\n",
]
)
expected_md = "\n".join(
[
"Title: Test Post",
"Date: 2014-11-04",
"Author: Alexis Métaireau",
"Category: Programming",
"Tags: Pelican, Python",
"Slug: test-post",
"\n",
]
)
self.assertEqual(build_header(*header_data), expected_docutils)
self.assertEqual(build_markdown_header(*header_data), expected_md)
def test_build_header_with_east_asian_characters(self):
header = build_header(
"これは広い幅の文字だけで構成されたタイトルです",
None,
None,
None,
None,
None,
)
self.assertEqual(
header,
(
"これは広い幅の文字だけで構成されたタイトルです\n"
"##############################################"
"\n\n"
),
)
def test_galleries_added_to_header(self):
header = build_header(
"test",
None,
None,
None,
None,
None,
attachments=["output/test1", "output/test2"],
)
self.assertEqual(
header, ("test\n####\n:attachments: output/test1, output/test2\n\n")
)
def test_galleries_added_to_markdown_header(self):
header = build_markdown_header(
"test",
None,
None,
None,
None,
None,
attachments=["output/test1", "output/test2"],
)
self.assertEqual(
header, "Title: test\nAttachments: output/test1, output/test2\n\n"
)
@unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
@unittest.skipUnless(LXML, "Needs lxml module")
class TestWordpressXMLAttachements(TestCaseWithCLocale):
def setUp(self):
super().setUp()
self.attachments = get_attachments(WORDPRESS_XML_SAMPLE)
def test_recognise_attachments(self):
self.assertTrue(self.attachments)
self.assertEqual(3, len(self.attachments.keys()))
def test_attachments_associated_with_correct_post(self):
self.assertTrue(self.attachments)
for post in self.attachments.keys():
if post is None:
expected = {
(
"https://upload.wikimedia.org/wikipedia/commons/"
"thumb/2/2c/Pelican_lakes_entrance02.jpg/"
"240px-Pelican_lakes_entrance02.jpg"
)
}
self.assertEqual(self.attachments[post], expected)
elif post == "with-excerpt":
expected_invalid = (
"http://thisurlisinvalid.notarealdomain/not_an_image.jpg"
)
expected_pelikan = (
"http://en.wikipedia.org/wiki/File:Pelikan_Walvis_Bay.jpg"
)
self.assertEqual(
self.attachments[post], {expected_invalid, expected_pelikan}
)
elif post == "with-tags":
expected_invalid = "http://thisurlisinvalid.notarealdomain"
self.assertEqual(self.attachments[post], {expected_invalid})
else:
self.fail(f"all attachments should match to a filename or None, {post}")
def test_download_attachments(self):
real_file = os.path.join(CUR_DIR, "content/article.rst")
good_url = path_to_file_url(real_file)
bad_url = "http://localhost:1/not_a_file.txt"
silent_da = mute()(download_attachments)
with temporary_folder() as temp:
locations = list(silent_da(temp, [good_url, bad_url]))
self.assertEqual(1, len(locations))
directory = locations[0]
self.assertTrue(
directory.endswith(posix_join("content", "article.rst")), directory
)
class TestTumblrImporter(TestCaseWithCLocale):
@patch("pelican.tools.pelican_import._get_tumblr_posts")
def test_posts(self, get):
def get_posts(api_key, blogname, offset=0):
if offset > 0:
return []
return [
{
"type": "photo",
"blog_name": "testy",
"date": "2019-11-07 21:26:40 UTC",
"timestamp": 1573162000,
"format": "html",
"slug": "a-slug",
"tags": ["economics"],
"state": "published",
"photos": [
{
"caption": "",
"original_size": {
"url": "https://..fccdc2360ba7182a.jpg",
"width": 634,
"height": 789,
},
}
],
}
]
get.side_effect = get_posts
posts = list(tumblr2fields("api_key", "blogname"))
self.assertEqual(
[
(
"Photo",
'<img alt="" src="https://..fccdc2360ba7182a.jpg" />\n',
"2019-11-07-a-slug",
"2019-11-07 21:26:40+0000",
"testy",
["photo"],
["economics"],
"published",
"article",
"html",
)
],
posts,
posts,
)
@patch("pelican.tools.pelican_import._get_tumblr_posts")
def test_video_embed(self, get):
def get_posts(api_key, blogname, offset=0):
if offset > 0:
return []
return [
{
"type": "video",
"blog_name": "testy",
"slug": "the-slug",
"date": "2017-07-07 20:31:41 UTC",
"timestamp": 1499459501,
"state": "published",
"format": "html",
"tags": [],
"source_url": "https://href.li/?https://www.youtube.com/a",
"source_title": "youtube.com",
"caption": "<p>Caption</p>",
"player": [
{"width": 250, "embed_code": "<iframe>1</iframe>"},
{"width": 400, "embed_code": "<iframe>2</iframe>"},
{"width": 500, "embed_code": "<iframe>3</iframe>"},
],
"video_type": "youtube",
}
]
get.side_effect = get_posts
posts = list(tumblr2fields("api_key", "blogname"))
self.assertEqual(
[
(
"youtube.com",
'<p><a href="https://href.li/?'
'https://www.youtube.com/a">via</a></p>\n<p>Caption</p>'
"<iframe>1</iframe>\n"
"<iframe>2</iframe>\n"
"<iframe>3</iframe>\n",
"2017-07-07-the-slug",
"2017-07-07 20:31:41+0000",
"testy",
["video"],
[],
"published",
"article",
"html",
)
],
posts,
posts,
)
@patch("pelican.tools.pelican_import._get_tumblr_posts")
def test_broken_video_embed(self, get):
def get_posts(api_key, blogname, offset=0):
if offset > 0:
return []
return [
{
"type": "video",
"blog_name": "testy",
"slug": "the-slug",
"date": "2016-08-14 16:37:35 UTC",
"timestamp": 1471192655,
"state": "published",
"format": "html",
"tags": ["interviews"],
"source_url": "https://href.li/?https://www.youtube.com/watch?v=b",
"source_title": "youtube.com",
"caption": "<p>Caption</p>",
"player": [
{
"width": 250,
# If video is gone, embed_code is False
"embed_code": False,
},
{"width": 400, "embed_code": False},
{"width": 500, "embed_code": False},
],
"video_type": "youtube",
}
]
get.side_effect = get_posts
posts = list(tumblr2fields("api_key", "blogname"))
self.assertEqual(
[
(
"youtube.com",
'<p><a href="https://href.li/?https://www.youtube.com/watch?'
'v=b">via</a></p>\n<p>Caption</p>'
"<p>(This video isn't available anymore.)</p>\n",
"2016-08-14-the-slug",
"2016-08-14 16:37:35+0000",
"testy",
["video"],
["interviews"],
"published",
"article",
"html",
)
],
posts,
posts,
)
class TestMediumImporter(TestCaseWithCLocale):
def setUp(self):
super().setUp()
self.test_content_root = "pelican/tests/content"
# The content coming out of parsing is similar, but not the same.
# Beautiful soup rearranges the order of attributes, for example.
# So, we keep a copy of the content for the test.
content_filename = f"{self.test_content_root}/medium_post_content.txt"
with open(content_filename, encoding="utf-8") as the_content_file:
# Many editors and scripts add a final newline, so live with that
# in our test
the_content = the_content_file.read()
assert the_content[-1] == "\n"
the_content = the_content[:-1]
self.post_tuple = (
"A title",
the_content,
# slug:
"2017-04-21-medium-post",
"2017-04-21 17:11",
"User Name",
None,
(),
"published",
"article",
"html",
)
def test_mediumpost2field(self):
"""Parse one post"""
post_filename = f"{self.test_content_root}/medium_posts/2017-04-21_-medium-post--d1bf01d62ba3.html"
val = mediumpost2fields(post_filename)
self.assertEqual(self.post_tuple, val, val)
def test_mediumposts2field(self):
"""Parse all posts in an export directory"""
posts = list(mediumposts2fields(f"{self.test_content_root}/medium_posts"))
self.assertEqual(1, len(posts))
self.assertEqual(self.post_tuple, posts[0])
def test_strip_content(self):
"""Strip out unhelpful tags"""
html_doc = (
"<section>This keeps <i>lots</i> of <b>tags</b>, but not "
"the <section>section</section> tags</section>"
)
soup = BeautifulSoup(html_doc, "html.parser")
self.assertEqual(
"This keeps <i>lots</i> of <b>tags</b>, but not the section tags",
strip_medium_post_content(soup),
)
def test_medium_slug(self):
# Remove hex stuff at the end
self.assertEqual(
"2017-04-27_A-long-title",
medium_slug(
"medium-export/posts/2017-04-27_A-long-title--2971442227dd.html"
),
)
# Remove "--DRAFT" at the end
self.assertEqual(
"2017-04-27_A-long-title",
medium_slug("medium-export/posts/2017-04-27_A-long-title--DRAFT.html"),
)
# Remove both (which happens)
self.assertEqual(
"draft_How-to-do", medium_slug("draft_How-to-do--DRAFT--87225c81dddd.html")
)
# If no hex stuff, leave it alone
self.assertEqual(
"2017-04-27_A-long-title",
medium_slug("medium-export/posts/2017-04-27_A-long-title.html"),
)
| 28,066
|
Python
|
.py
| 714
| 26.788515
| 107
| 0.519706
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,228
|
__init__.py
|
getpelican_pelican/pelican/tests/__init__.py
|
import logging
import warnings
from pelican.log import log_warnings
# redirect warnings module to use logging instead
log_warnings()
# setup warnings to log DeprecationWarning's and error on
# warnings in pelican's codebase
warnings.simplefilter("default", DeprecationWarning)
warnings.filterwarnings("error", ".*", Warning, "pelican")
# Add a NullHandler to silence warning about no available handlers
logging.getLogger().addHandler(logging.NullHandler())
| 461
|
Python
|
.py
| 11
| 40.545455
| 66
| 0.825112
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,229
|
test_plugins.py
|
getpelican_pelican/pelican/tests/test_plugins.py
|
import os
from contextlib import contextmanager
from pelican.plugins._utils import (
get_namespace_plugins,
get_plugin_name,
load_plugins,
plugin_enabled,
)
from pelican.plugins.signals import signal
from pelican.tests.dummy_plugins.normal_plugin import normal_plugin
from pelican.tests.support import unittest
@contextmanager
def tmp_namespace_path(path):
"""Context manager for temporarily appending namespace plugin packages
path: path containing the `pelican` folder
This modifies the `pelican.__path__` and lets the `pelican.plugins`
namespace package resolve it from that.
"""
# This avoids calls to internal `pelican.plugins.__path__._recalculate()`
# as it should not be necessary
import pelican
old_path = pelican.__path__[:]
try:
pelican.__path__.append(os.path.join(path, "pelican"))
yield
finally:
pelican.__path__ = old_path
class PluginTest(unittest.TestCase):
_PLUGIN_FOLDER = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "dummy_plugins"
)
_NS_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, "namespace_plugin")
_NORMAL_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, "normal_plugin")
def test_namespace_path_modification(self):
import pelican
import pelican.plugins
old_path = pelican.__path__[:]
# not existing path
path = os.path.join(self._PLUGIN_FOLDER, "foo")
with tmp_namespace_path(path):
self.assertIn(os.path.join(path, "pelican"), pelican.__path__)
# foo/pelican does not exist, so it won't propagate
self.assertNotIn(
os.path.join(path, "pelican", "plugins"), pelican.plugins.__path__
)
# verify that we restored path back
self.assertEqual(pelican.__path__, old_path)
# existing path
with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
self.assertIn(
os.path.join(self._NS_PLUGIN_FOLDER, "pelican"), pelican.__path__
)
# /namespace_plugin/pelican exists, so it should be in
self.assertIn(
os.path.join(self._NS_PLUGIN_FOLDER, "pelican", "plugins"),
pelican.plugins.__path__,
)
self.assertEqual(pelican.__path__, old_path)
def test_get_namespace_plugins(self):
# existing namespace plugins
existing_ns_plugins = get_namespace_plugins()
# with plugin
with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
ns_plugins = get_namespace_plugins()
self.assertEqual(len(ns_plugins), len(existing_ns_plugins) + 1)
self.assertIn("pelican.plugins.ns_plugin", ns_plugins)
self.assertEqual(
ns_plugins["pelican.plugins.ns_plugin"].NAME, "namespace plugin"
)
# should be back to existing namespace plugins outside `with`
ns_plugins = get_namespace_plugins()
self.assertEqual(ns_plugins, existing_ns_plugins)
def test_load_plugins(self):
def get_plugin_names(plugins):
return {get_plugin_name(p) for p in plugins}
# existing namespace plugins
existing_ns_plugins = load_plugins({})
with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
# with no `PLUGINS` setting, load namespace plugins
plugins = load_plugins({})
self.assertEqual(len(plugins), len(existing_ns_plugins) + 1, plugins)
self.assertEqual(
{"pelican.plugins.ns_plugin"} | get_plugin_names(existing_ns_plugins),
get_plugin_names(plugins),
)
# disable namespace plugins with `PLUGINS = []`
SETTINGS = {"PLUGINS": []}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 0, plugins)
# with `PLUGINS`, load only specified plugins
# normal plugin
SETTINGS = {
"PLUGINS": ["normal_plugin"],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual({"normal_plugin"}, get_plugin_names(plugins))
# normal submodule/subpackage plugins
SETTINGS = {
"PLUGINS": [
"normal_submodule_plugin.subplugin",
"normal_submodule_plugin.subpackage.subpackage",
],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 2, plugins)
self.assertEqual(
{
"normal_submodule_plugin.subplugin",
"normal_submodule_plugin.subpackage.subpackage",
},
get_plugin_names(plugins),
)
# ensure normal plugins are loaded only once
SETTINGS = {
"PLUGINS": ["normal_plugin"],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = load_plugins(SETTINGS)
for plugin in load_plugins(SETTINGS):
# The second load_plugins() should return the same plugin
# objects as the first one
self.assertIn(plugin, plugins)
# namespace plugin short
SETTINGS = {"PLUGINS": ["ns_plugin"]}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual({"pelican.plugins.ns_plugin"}, get_plugin_names(plugins))
# namespace plugin long
SETTINGS = {"PLUGINS": ["pelican.plugins.ns_plugin"]}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual({"pelican.plugins.ns_plugin"}, get_plugin_names(plugins))
# normal and namespace plugin
SETTINGS = {
"PLUGINS": ["normal_plugin", "ns_plugin"],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 2, plugins)
self.assertEqual(
{"normal_plugin", "pelican.plugins.ns_plugin"},
get_plugin_names(plugins),
)
def test_get_plugin_name(self):
self.assertEqual(
get_plugin_name(normal_plugin),
"pelican.tests.dummy_plugins.normal_plugin.normal_plugin",
)
class NoopPlugin:
def register(self):
pass
self.assertEqual(
get_plugin_name(NoopPlugin),
"PluginTest.test_get_plugin_name.<locals>.NoopPlugin",
)
self.assertEqual(
get_plugin_name(NoopPlugin()),
"PluginTest.test_get_plugin_name.<locals>.NoopPlugin",
)
def test_plugin_enabled(self):
def get_plugin_names(plugins):
return [get_plugin_name(p) for p in plugins]
with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
# with no `PLUGINS` setting, load namespace plugins
SETTINGS = {}
plugins = get_plugin_names(load_plugins(SETTINGS))
self.assertTrue(plugin_enabled("ns_plugin", plugins))
self.assertTrue(plugin_enabled("pelican.plugins.ns_plugin", plugins))
self.assertFalse(plugin_enabled("normal_plugin", plugins))
self.assertFalse(plugin_enabled("unknown", plugins))
# disable namespace plugins with `PLUGINS = []`
SETTINGS = {"PLUGINS": []}
plugins = get_plugin_names(load_plugins(SETTINGS))
self.assertFalse(plugin_enabled("ns_plugin", plugins))
self.assertFalse(plugin_enabled("pelican.plugins.ns_plugin", plugins))
self.assertFalse(plugin_enabled("normal_plugin", plugins))
self.assertFalse(plugin_enabled("unknown", plugins))
# with `PLUGINS`, load only specified plugins
# normal plugin
SETTINGS = {
"PLUGINS": ["normal_plugin"],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = get_plugin_names(load_plugins(SETTINGS))
self.assertFalse(plugin_enabled("ns_plugin", plugins))
self.assertFalse(plugin_enabled("pelican.plugins.ns_plugin", plugins))
self.assertTrue(plugin_enabled("normal_plugin", plugins))
self.assertFalse(plugin_enabled("unknown", plugins))
# normal submodule/subpackage plugins
SETTINGS = {
"PLUGINS": [
"normal_submodule_plugin.subplugin",
"normal_submodule_plugin.subpackage.subpackage",
],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = get_plugin_names(load_plugins(SETTINGS))
self.assertFalse(plugin_enabled("ns_plugin", plugins))
self.assertFalse(plugin_enabled("pelican.plugins.ns_plugin", plugins))
self.assertFalse(plugin_enabled("normal_plugin", plugins))
self.assertFalse(plugin_enabled("unknown", plugins))
# namespace plugin short
SETTINGS = {"PLUGINS": ["ns_plugin"]}
plugins = get_plugin_names(load_plugins(SETTINGS))
self.assertTrue(plugin_enabled("ns_plugin", plugins))
self.assertTrue(plugin_enabled("pelican.plugins.ns_plugin", plugins))
self.assertFalse(plugin_enabled("normal_plugin", plugins))
self.assertFalse(plugin_enabled("unknown", plugins))
# namespace plugin long
SETTINGS = {"PLUGINS": ["pelican.plugins.ns_plugin"]}
plugins = get_plugin_names(load_plugins(SETTINGS))
self.assertTrue(plugin_enabled("ns_plugin", plugins))
self.assertTrue(plugin_enabled("pelican.plugins.ns_plugin", plugins))
self.assertFalse(plugin_enabled("normal_plugin", plugins))
self.assertFalse(plugin_enabled("unknown", plugins))
# normal and namespace plugin
SETTINGS = {
"PLUGINS": ["normal_plugin", "ns_plugin"],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = get_plugin_names(load_plugins(SETTINGS))
self.assertTrue(plugin_enabled("ns_plugin", plugins))
self.assertTrue(plugin_enabled("pelican.plugins.ns_plugin", plugins))
self.assertTrue(plugin_enabled("normal_plugin", plugins))
self.assertFalse(plugin_enabled("unknown", plugins))
def test_blinker_is_ordered(self):
"""ensure that call order is connetion order"""
dummy_signal = signal("dummpy_signal")
functions = []
expected = []
for i in range(50):
# function appends value of i to a list
def func(input, i=i):
input.append(i)
functions.append(func)
# we expect functions to be run in the connection order
dummy_signal.connect(func)
expected.append(i)
input = []
dummy_signal.send(input)
self.assertEqual(input, expected)
| 11,374
|
Python
|
.py
| 244
| 34.721311
| 86
| 0.59614
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,230
|
test_generators.py
|
getpelican_pelican/pelican/tests/test_generators.py
|
import os
import sys
from shutil import copy, rmtree
from tempfile import mkdtemp
from unittest.mock import MagicMock
from pelican.generators import (
ArticlesGenerator,
Generator,
PagesGenerator,
PelicanTemplateNotFound,
StaticGenerator,
TemplatePagesGenerator,
)
from pelican.tests.support import (
TestCaseWithCLocale,
can_symlink,
get_context,
get_settings,
unittest,
)
from pelican.writers import Writer
CUR_DIR = os.path.dirname(__file__)
CONTENT_DIR = os.path.join(CUR_DIR, "content")
class TestGenerator(TestCaseWithCLocale):
def setUp(self):
super().setUp()
self.settings = get_settings()
self.settings["READERS"] = {"asc": None}
self.generator = Generator(
self.settings.copy(), self.settings, CUR_DIR, self.settings["THEME"], None
)
def test_include_path(self):
self.settings["IGNORE_FILES"] = {"ignored1.rst", "ignored2.rst"}
filename = os.path.join(CUR_DIR, "content", "article.rst")
include_path = self.generator._include_path
self.assertTrue(include_path(filename))
self.assertTrue(include_path(filename, extensions=("rst",)))
self.assertFalse(include_path(filename, extensions=("md",)))
ignored_file = os.path.join(CUR_DIR, "content", "ignored1.rst")
self.assertFalse(include_path(ignored_file))
def test_get_files_exclude(self):
"""Test that Generator.get_files() properly excludes directories."""
# We use our own Generator so we can give it our own content path
generator = Generator(
context=self.settings.copy(),
settings=self.settings,
path=os.path.join(CUR_DIR, "nested_content"),
theme=self.settings["THEME"],
output_path=None,
)
filepaths = generator.get_files(paths=["maindir"])
found_files = {os.path.basename(f) for f in filepaths}
expected_files = {"maindir.md", "subdir.md"}
self.assertFalse(
expected_files - found_files, "get_files() failed to find one or more files"
)
# Test string as `paths` argument rather than list
filepaths = generator.get_files(paths="maindir")
found_files = {os.path.basename(f) for f in filepaths}
expected_files = {"maindir.md", "subdir.md"}
self.assertFalse(
expected_files - found_files, "get_files() failed to find one or more files"
)
filepaths = generator.get_files(paths=[""], exclude=["maindir"])
found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn(
"maindir.md",
found_files,
"get_files() failed to exclude a top-level directory",
)
self.assertNotIn(
"subdir.md",
found_files,
"get_files() failed to exclude a subdir of an excluded directory",
)
filepaths = generator.get_files(
paths=[""], exclude=[os.path.join("maindir", "subdir")]
)
found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn(
"subdir.md", found_files, "get_files() failed to exclude a subdirectory"
)
filepaths = generator.get_files(paths=[""], exclude=["subdir"])
found_files = {os.path.basename(f) for f in filepaths}
self.assertIn(
"subdir.md",
found_files,
"get_files() excluded a subdirectory by name, ignoring its path",
)
def test_custom_jinja_environment(self):
"""
Test that setting the JINJA_ENVIRONMENT
properly gets set from the settings config
"""
settings = get_settings()
comment_start_string = "abc"
comment_end_string = "/abc"
settings["JINJA_ENVIRONMENT"] = {
"comment_start_string": comment_start_string,
"comment_end_string": comment_end_string,
}
generator = Generator(
settings.copy(), settings, CUR_DIR, settings["THEME"], None
)
self.assertEqual(comment_start_string, generator.env.comment_start_string)
self.assertEqual(comment_end_string, generator.env.comment_end_string)
def test_theme_overrides(self):
"""
Test that the THEME_TEMPLATES_OVERRIDES configuration setting is
utilized correctly in the Generator.
"""
override_dirs = (
os.path.join(CUR_DIR, "theme_overrides", "level1"),
os.path.join(CUR_DIR, "theme_overrides", "level2"),
)
self.settings["THEME_TEMPLATES_OVERRIDES"] = override_dirs
generator = Generator(
context=self.settings.copy(),
settings=self.settings,
path=CUR_DIR,
theme=self.settings["THEME"],
output_path=None,
)
filename = generator.get_template("article").filename
self.assertEqual(override_dirs[0], os.path.dirname(filename))
self.assertEqual("article.html", os.path.basename(filename))
filename = generator.get_template("authors").filename
self.assertEqual(override_dirs[1], os.path.dirname(filename))
self.assertEqual("authors.html", os.path.basename(filename))
filename = generator.get_template("taglist").filename
self.assertEqual(
os.path.join(self.settings["THEME"], "templates"), os.path.dirname(filename)
)
self.assertNotIn(os.path.dirname(filename), override_dirs)
self.assertEqual("taglist.html", os.path.basename(filename))
def test_simple_prefix(self):
"""
Test `!simple` theme prefix.
"""
filename = self.generator.get_template("!simple/authors").filename
expected_path = os.path.join(
os.path.dirname(CUR_DIR), "themes", "simple", "templates"
)
self.assertEqual(expected_path, os.path.dirname(filename))
self.assertEqual("authors.html", os.path.basename(filename))
def test_theme_prefix(self):
"""
Test `!theme` theme prefix.
"""
filename = self.generator.get_template("!theme/authors").filename
expected_path = os.path.join(
os.path.dirname(CUR_DIR), "themes", "notmyidea", "templates"
)
self.assertEqual(expected_path, os.path.dirname(filename))
self.assertEqual("authors.html", os.path.basename(filename))
def test_bad_prefix(self):
"""
Test unknown/bad theme prefix throws exception.
"""
self.assertRaises(
PelicanTemplateNotFound, self.generator.get_template, "!UNKNOWN/authors"
)
class TestArticlesGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
settings = get_settings()
settings["DEFAULT_CATEGORY"] = "Default"
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["READERS"] = {"asc": None}
settings["CACHE_CONTENT"] = False
context = get_context(settings)
cls.generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
cls.generator.generate_context()
cls.articles = cls.distill_articles(cls.generator.articles)
cls.drafts = cls.distill_articles(cls.generator.drafts)
cls.hidden_articles = cls.distill_articles(cls.generator.hidden_articles)
def setUp(self):
self.temp_cache = mkdtemp(prefix="pelican_cache.")
def tearDown(self):
rmtree(self.temp_cache)
@staticmethod
def distill_articles(articles):
return [
[article.title, article.status, article.category.name, article.template]
for article in articles
]
def test_generate_feeds(self):
settings = get_settings()
settings["CACHE_PATH"] = self.temp_cache
generator = ArticlesGenerator(
context=settings,
settings=settings,
path=None,
theme=settings["THEME"],
output_path=None,
)
writer = MagicMock()
generator.generate_feeds(writer)
writer.write_feed.assert_called_with(
[], settings, "feeds/all.atom.xml", "feeds/all.atom.xml"
)
generator = ArticlesGenerator(
context=settings,
settings=get_settings(FEED_ALL_ATOM=None),
path=None,
theme=settings["THEME"],
output_path=None,
)
writer = MagicMock()
generator.generate_feeds(writer)
self.assertFalse(writer.write_feed.called)
def test_generate_feeds_override_url(self):
settings = get_settings()
settings["CACHE_PATH"] = self.temp_cache
settings["FEED_ALL_ATOM_URL"] = "feeds/atom/all/"
generator = ArticlesGenerator(
context=settings,
settings=settings,
path=None,
theme=settings["THEME"],
output_path=None,
)
writer = MagicMock()
generator.generate_feeds(writer)
writer.write_feed.assert_called_with(
[], settings, "feeds/all.atom.xml", "feeds/atom/all/"
)
def test_generate_context(self):
articles_expected = [
["A title", "published", "medium_posts", "article"],
["Article title", "published", "Default", "article"],
[
"Article with markdown and summary metadata multi",
"published",
"Default",
"article",
],
[
"Article with markdown and nested summary metadata",
"published",
"Default",
"article",
],
[
"Article with markdown and summary metadata single",
"published",
"Default",
"article",
],
[
"Article with markdown containing footnotes",
"published",
"Default",
"article",
],
["Article with template", "published", "Default", "custom"],
["Metadata tags as list!", "published", "Default", "article"],
["Rst with filename metadata", "published", "yeah", "article"],
["One -, two --, three --- dashes!", "published", "Default", "article"],
["One -, two --, three --- dashes!", "published", "Default", "article"],
["Test Markdown extensions", "published", "Default", "article"],
["Test markdown File", "published", "test", "article"],
["Test md File", "published", "test", "article"],
["Test mdown File", "published", "test", "article"],
["Test metadata duplicates", "published", "test", "article"],
["Test mkd File", "published", "test", "article"],
["This is a super article !", "published", "Yeah", "article"],
["This is a super article !", "published", "Yeah", "article"],
[
"Article with Nonconformant HTML meta tags",
"published",
"Default",
"article",
],
["This is a super article !", "published", "yeah", "article"],
["This is a super article !", "published", "yeah", "article"],
["This is a super article !", "published", "yeah", "article"],
["This is a super article !", "published", "yeah", "article"],
["This is a super article !", "published", "yeah", "article"],
["This is a super article !", "published", "yeah", "article"],
["This is a super article !", "published", "yeah", "article"],
["This is a super article !", "published", "yeah", "article"],
["This is a super article !", "published", "Default", "article"],
["Article with an inline SVG", "published", "Default", "article"],
["Article with markdown and empty tags", "published", "Default", "article"],
["This is an article with category !", "published", "yeah", "article"],
[
"This is an article with multiple authors!",
"published",
"Default",
"article",
],
[
"This is an article with multiple authors!",
"published",
"Default",
"article",
],
[
"This is an article with multiple authors in list format!",
"published",
"Default",
"article",
],
[
"This is an article with multiple authors in lastname, "
"firstname format!",
"published",
"Default",
"article",
],
[
"This is an article without category !",
"published",
"Default",
"article",
],
[
"This is an article without category !",
"published",
"TestCategory",
"article",
],
[
"An Article With Code Block To Test Typogrify Ignore",
"published",
"Default",
"article",
],
[
"マックOS X 10.8でパイソンとVirtualenvをインストールと設定",
"published",
"指導書",
"article",
],
]
self.assertEqual(sorted(articles_expected), sorted(self.articles))
def test_articles_draft(self):
draft_articles_expected = [
["Draft article", "draft", "Default", "article"],
]
self.assertEqual(sorted(draft_articles_expected), sorted(self.drafts))
def test_articles_hidden(self):
hidden_articles_expected = [
["Hidden article", "hidden", "Default", "article"],
]
self.assertEqual(sorted(hidden_articles_expected), sorted(self.hidden_articles))
def test_generate_categories(self):
# test for name
# categories are grouped by slug; if two categories have the same slug
# but different names they will be grouped together, the first one in
# terms of process order will define the name for that category
categories = [cat.name for cat, _ in self.generator.categories]
categories_alternatives = (
sorted(
["Default", "TestCategory", "medium_posts", "Yeah", "test", "指導書"]
),
sorted(
["Default", "TestCategory", "medium_posts", "yeah", "test", "指導書"]
),
)
self.assertIn(sorted(categories), categories_alternatives)
# test for slug
categories = [cat.slug for cat, _ in self.generator.categories]
categories_expected = [
"default",
"testcategory",
"medium_posts",
"yeah",
"test",
"zhi-dao-shu",
]
self.assertEqual(sorted(categories), sorted(categories_expected))
def test_do_not_use_folder_as_category(self):
settings = get_settings()
settings["DEFAULT_CATEGORY"] = "Default"
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["USE_FOLDER_AS_CATEGORY"] = False
settings["CACHE_PATH"] = self.temp_cache
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
# test for name
# categories are grouped by slug; if two categories have the same slug
# but different names they will be grouped together, the first one in
# terms of process order will define the name for that category
categories = [cat.name for cat, _ in generator.categories]
categories_alternatives = (
sorted(["Default", "Yeah", "test", "指導書"]),
sorted(["Default", "yeah", "test", "指導書"]),
)
self.assertIn(sorted(categories), categories_alternatives)
# test for slug
categories = [cat.slug for cat, _ in generator.categories]
categories_expected = ["default", "yeah", "test", "zhi-dao-shu"]
self.assertEqual(sorted(categories), sorted(categories_expected))
def test_direct_templates_save_as_url_default(self):
settings = get_settings()
settings["CACHE_PATH"] = self.temp_cache
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=None,
theme=settings["THEME"],
output_path=None,
)
write = MagicMock()
generator.generate_direct_templates(write)
write.assert_called_with(
"archives.html",
generator.get_template("archives"),
context,
articles=generator.articles,
dates=generator.dates,
blog=True,
template_name="archives",
page_name="archives",
url="archives.html",
)
def test_direct_templates_save_as_url_modified(self):
settings = get_settings()
settings["DIRECT_TEMPLATES"] = ["archives"]
settings["ARCHIVES_SAVE_AS"] = "archives/index.html"
settings["ARCHIVES_URL"] = "archives/"
settings["CACHE_PATH"] = self.temp_cache
generator = ArticlesGenerator(
context=settings,
settings=settings,
path=None,
theme=settings["THEME"],
output_path=None,
)
write = MagicMock()
generator.generate_direct_templates(write)
write.assert_called_with(
"archives/index.html",
generator.get_template("archives"),
settings,
articles=generator.articles,
dates=generator.dates,
blog=True,
template_name="archives",
page_name="archives/index",
url="archives/",
)
def test_direct_templates_save_as_false(self):
settings = get_settings()
settings["DIRECT_TEMPLATES"] = ["archives"]
settings["ARCHIVES_SAVE_AS"] = False
settings["CACHE_PATH"] = self.temp_cache
generator = ArticlesGenerator(
context=settings,
settings=settings,
path=None,
theme=settings["THEME"],
output_path=None,
)
write = MagicMock()
generator.generate_direct_templates(write)
self.assertEqual(write.call_count, 0)
def test_per_article_template(self):
"""
Custom template articles get the field but standard/unset are None
"""
custom_template = ["Article with template", "published", "Default", "custom"]
standard_template = [
"This is a super article !",
"published",
"Yeah",
"article",
]
self.assertIn(custom_template, self.articles)
self.assertIn(standard_template, self.articles)
def test_period_archives_context(self):
"""Test correctness of the period_archives context values."""
settings = get_settings()
settings["CACHE_PATH"] = self.temp_cache
# No period archives enabled:
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
period_archives = generator.context["period_archives"]
self.assertEqual(len(period_archives.items()), 0)
# Year archives enabled:
settings["YEAR_ARCHIVE_SAVE_AS"] = "posts/{date:%Y}/index.html"
settings["YEAR_ARCHIVE_URL"] = "posts/{date:%Y}/"
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
period_archives = generator.context["period_archives"]
abbreviated_archives = {
granularity: {period["period"] for period in periods}
for granularity, periods in period_archives.items()
}
self.maxDiff = None
expected = {"year": {(1970,), (2010,), (2012,), (2014,), (2017,)}}
self.assertEqual(expected, abbreviated_archives)
# Month archives enabled:
settings["MONTH_ARCHIVE_SAVE_AS"] = "posts/{date:%Y}/{date:%b}/index.html"
settings["MONTH_ARCHIVE_URL"] = "posts/{date:%Y}/{date:%b}/"
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
period_archives = generator.context["period_archives"]
abbreviated_archives = {
granularity: {period["period"] for period in periods}
for granularity, periods in period_archives.items()
}
expected = {
"year": {(1970,), (2010,), (2012,), (2014,), (2017,)},
"month": {
(1970, "January"),
(2010, "December"),
(2012, "December"),
(2012, "November"),
(2012, "October"),
(2014, "February"),
(2017, "April"),
},
}
self.assertEqual(expected, abbreviated_archives)
# Day archives enabled:
settings["DAY_ARCHIVE_SAVE_AS"] = (
"posts/{date:%Y}/{date:%b}/{date:%d}/index.html"
)
settings["DAY_ARCHIVE_URL"] = "posts/{date:%Y}/{date:%b}/{date:%d}/"
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
period_archives = generator.context["period_archives"]
abbreviated_archives = {
granularity: {period["period"] for period in periods}
for granularity, periods in period_archives.items()
}
expected = {
"year": {(1970,), (2010,), (2012,), (2014,), (2017,)},
"month": {
(1970, "January"),
(2010, "December"),
(2012, "December"),
(2012, "November"),
(2012, "October"),
(2014, "February"),
(2017, "April"),
},
"day": {
(1970, "January", 1),
(2010, "December", 2),
(2012, "December", 20),
(2012, "November", 29),
(2012, "October", 30),
(2012, "October", 31),
(2014, "February", 9),
(2017, "April", 21),
},
}
self.assertEqual(expected, abbreviated_archives)
# Further item values tests
filtered_archives = [
p for p in period_archives["day"] if p["period"] == (2014, "February", 9)
]
self.assertEqual(len(filtered_archives), 1)
sample_archive = filtered_archives[0]
self.assertEqual(sample_archive["period_num"], (2014, 2, 9))
self.assertEqual(sample_archive["save_as"], "posts/2014/Feb/09/index.html")
self.assertEqual(sample_archive["url"], "posts/2014/Feb/09/")
articles = [
d
for d in generator.articles
if d.date.year == 2014 and d.date.month == 2 and d.date.day == 9
]
self.assertEqual(len(sample_archive["articles"]), len(articles))
dates = [
d
for d in generator.dates
if d.date.year == 2014 and d.date.month == 2 and d.date.day == 9
]
self.assertEqual(len(sample_archive["dates"]), len(dates))
self.assertEqual(sample_archive["dates"][0].title, dates[0].title)
self.assertEqual(sample_archive["dates"][0].date, dates[0].date)
def test_period_in_timeperiod_archive(self):
"""
Test that the context of a generated period_archive is passed
'period' : a tuple of year, month, day according to the time period
"""
settings = get_settings()
settings["YEAR_ARCHIVE_SAVE_AS"] = "posts/{date:%Y}/index.html"
settings["YEAR_ARCHIVE_URL"] = "posts/{date:%Y}/"
settings["CACHE_PATH"] = self.temp_cache
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [d for d in generator.dates if d.date.year == 1970]
articles = [d for d in generator.articles if d.date.year == 1970]
self.assertEqual(len(dates), 1)
# among other things it must have at least been called with this
context["period"] = (1970,)
context["period_num"] = (1970,)
write.assert_called_with(
"posts/1970/index.html",
generator.get_template("period_archives"),
context,
blog=True,
articles=articles,
dates=dates,
template_name="period_archives",
url="posts/1970/",
all_articles=generator.articles,
)
settings["MONTH_ARCHIVE_SAVE_AS"] = "posts/{date:%Y}/{date:%b}/index.html"
settings["MONTH_ARCHIVE_URL"] = "posts/{date:%Y}/{date:%b}/"
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [
d for d in generator.dates if d.date.year == 1970 and d.date.month == 1
]
articles = [
d for d in generator.articles if d.date.year == 1970 and d.date.month == 1
]
self.assertEqual(len(dates), 1)
context["period"] = (1970, "January")
context["period_num"] = (1970, 1)
# among other things it must have at least been called with this
write.assert_called_with(
"posts/1970/Jan/index.html",
generator.get_template("period_archives"),
context,
blog=True,
articles=articles,
dates=dates,
template_name="period_archives",
url="posts/1970/Jan/",
all_articles=generator.articles,
)
settings["DAY_ARCHIVE_SAVE_AS"] = (
"posts/{date:%Y}/{date:%b}/{date:%d}/index.html"
)
settings["DAY_ARCHIVE_URL"] = "posts/{date:%Y}/{date:%b}/{date:%d}/"
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [
d
for d in generator.dates
if d.date.year == 1970 and d.date.month == 1 and d.date.day == 1
]
articles = [
d
for d in generator.articles
if d.date.year == 1970 and d.date.month == 1 and d.date.day == 1
]
self.assertEqual(len(dates), 1)
context["period"] = (1970, "January", 1)
context["period_num"] = (1970, 1, 1)
# among other things it must have at least been called with this
write.assert_called_with(
"posts/1970/Jan/01/index.html",
generator.get_template("period_archives"),
context,
blog=True,
articles=articles,
dates=dates,
template_name="period_archives",
url="posts/1970/Jan/01/",
all_articles=generator.articles,
)
def test_nonexistent_template(self):
"""Attempt to load a non-existent template"""
settings = get_settings()
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=None,
theme=settings["THEME"],
output_path=None,
)
self.assertRaises(Exception, generator.get_template, "not_a_template")
def test_generate_authors(self):
"""Check authors generation."""
authors = [author.name for author, _ in self.generator.authors]
authors_expected = sorted(
[
"Alexis Métaireau",
"Author, First",
"Author, Second",
"First Author",
"Second Author",
]
)
self.assertEqual(sorted(authors), authors_expected)
# test for slug
authors = [author.slug for author, _ in self.generator.authors]
authors_expected = [
"alexis-metaireau",
"author-first",
"author-second",
"first-author",
"second-author",
]
self.assertEqual(sorted(authors), sorted(authors_expected))
def test_standard_metadata_in_default_metadata(self):
settings = get_settings()
settings["CACHE_CONTENT"] = False
settings["DEFAULT_CATEGORY"] = "Default"
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["DEFAULT_METADATA"] = (
("author", "Blogger"),
# category will be ignored in favor of
# DEFAULT_CATEGORY
("category", "Random"),
("tags", "general, untagged"),
)
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
authors = sorted([author.name for author, _ in generator.authors])
authors_expected = sorted(
[
"Alexis Métaireau",
"Blogger",
"Author, First",
"Author, Second",
"First Author",
"Second Author",
]
)
self.assertEqual(authors, authors_expected)
categories = sorted([category.name for category, _ in generator.categories])
categories_expected = [
sorted(
["Default", "TestCategory", "medium_posts", "yeah", "test", "指導書"]
),
sorted(
["Default", "TestCategory", "medium_posts", "Yeah", "test", "指導書"]
),
]
self.assertIn(categories, categories_expected)
tags = sorted([tag.name for tag in generator.tags])
tags_expected = sorted(
["bar", "foo", "foobar", "general", "untagged", "パイソン", "マック"]
)
self.assertEqual(tags, tags_expected)
def test_article_order_by(self):
settings = get_settings()
settings["DEFAULT_CATEGORY"] = "Default"
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["ARTICLE_ORDER_BY"] = "title"
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
expected = [
"A title",
"An Article With Code Block To Test Typogrify Ignore",
"Article title",
"Article with Nonconformant HTML meta tags",
"Article with an inline SVG",
"Article with markdown and empty tags",
"Article with markdown and nested summary metadata",
"Article with markdown and summary metadata multi",
"Article with markdown and summary metadata single",
"Article with markdown containing footnotes",
"Article with template",
"Metadata tags as list!",
"One -, two --, three --- dashes!",
"One -, two --, three --- dashes!",
"Rst with filename metadata",
"Test Markdown extensions",
"Test markdown File",
"Test md File",
"Test mdown File",
"Test metadata duplicates",
"Test mkd File",
"This is a super article !",
"This is a super article !",
"This is a super article !",
"This is a super article !",
"This is a super article !",
"This is a super article !",
"This is a super article !",
"This is a super article !",
"This is a super article !",
"This is a super article !",
"This is a super article !",
"This is an article with category !",
(
"This is an article with multiple authors in lastname, "
"firstname format!"
),
"This is an article with multiple authors in list format!",
"This is an article with multiple authors!",
"This is an article with multiple authors!",
"This is an article without category !",
"This is an article without category !",
"マックOS X 10.8でパイソンとVirtualenvをインストールと設定",
]
articles = [article.title for article in generator.articles]
self.assertEqual(articles, expected)
# reversed title
settings = get_settings()
settings["DEFAULT_CATEGORY"] = "Default"
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["ARTICLE_ORDER_BY"] = "reversed-title"
context = get_context(settings)
generator = ArticlesGenerator(
context=context,
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
articles = [article.title for article in generator.articles]
self.assertEqual(articles, list(reversed(expected)))
class TestPageGenerator(unittest.TestCase):
# Note: Every time you want to test for a new field; Make sure the test
# pages in "TestPages" have all the fields Add it to distilled in
# distill_pages Then update the assertEqual in test_generate_context
# to match expected
def setUp(self):
self.temp_cache = mkdtemp(prefix="pelican_cache.")
def tearDown(self):
rmtree(self.temp_cache)
def distill_pages(self, pages):
return [[page.title, page.status, page.template] for page in pages]
def test_generate_context(self):
settings = get_settings()
settings["CACHE_PATH"] = self.temp_cache
settings["PAGE_PATHS"] = ["TestPages"] # relative to CUR_DIR
settings["DEFAULT_DATE"] = (1970, 1, 1)
context = get_context(settings)
generator = PagesGenerator(
context=context,
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
pages = self.distill_pages(generator.pages)
hidden_pages = self.distill_pages(generator.hidden_pages)
draft_pages = self.distill_pages(generator.draft_pages)
pages_expected = [
["This is a test page", "published", "page"],
["This is a markdown test page", "published", "page"],
["This is a test page with a preset template", "published", "custom"],
["Page with a bunch of links", "published", "page"],
["Page with static links", "published", "page"],
["A Page (Test) for sorting", "published", "page"],
]
hidden_pages_expected = [
["This is a test hidden page", "hidden", "page"],
["This is a markdown test hidden page", "hidden", "page"],
["This is a test hidden page with a custom template", "hidden", "custom"],
]
draft_pages_expected = [
["This is a test draft page", "draft", "page"],
["This is a markdown test draft page", "draft", "page"],
["This is a test draft page with a custom template", "draft", "custom"],
]
self.assertEqual(sorted(pages_expected), sorted(pages))
self.assertEqual(
sorted(pages_expected),
sorted(self.distill_pages(generator.context["pages"])),
)
self.assertEqual(sorted(hidden_pages_expected), sorted(hidden_pages))
self.assertEqual(sorted(draft_pages_expected), sorted(draft_pages))
self.assertEqual(
sorted(hidden_pages_expected),
sorted(self.distill_pages(generator.context["hidden_pages"])),
)
self.assertEqual(
sorted(draft_pages_expected),
sorted(self.distill_pages(generator.context["draft_pages"])),
)
def test_generate_sorted(self):
settings = get_settings()
settings["PAGE_PATHS"] = ["TestPages"] # relative to CUR_DIR
settings["CACHE_PATH"] = self.temp_cache
settings["DEFAULT_DATE"] = (1970, 1, 1)
context = get_context(settings)
# default sort (filename)
pages_expected_sorted_by_filename = [
["This is a test page", "published", "page"],
["This is a markdown test page", "published", "page"],
["A Page (Test) for sorting", "published", "page"],
["Page with a bunch of links", "published", "page"],
["Page with static links", "published", "page"],
["This is a test page with a preset template", "published", "custom"],
]
generator = PagesGenerator(
context=context,
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_filename, pages)
# sort by title
pages_expected_sorted_by_title = [
["A Page (Test) for sorting", "published", "page"],
["Page with a bunch of links", "published", "page"],
["Page with static links", "published", "page"],
["This is a markdown test page", "published", "page"],
["This is a test page", "published", "page"],
["This is a test page with a preset template", "published", "custom"],
]
settings["PAGE_ORDER_BY"] = "title"
context = get_context(settings)
generator = PagesGenerator(
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_title, pages)
# sort by title reversed
pages_expected_sorted_by_title = [
["This is a test page with a preset template", "published", "custom"],
["This is a test page", "published", "page"],
["This is a markdown test page", "published", "page"],
["Page with static links", "published", "page"],
["Page with a bunch of links", "published", "page"],
["A Page (Test) for sorting", "published", "page"],
]
settings["PAGE_ORDER_BY"] = "reversed-title"
context = get_context(settings)
generator = PagesGenerator(
context=context,
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_title, pages)
def test_tag_and_category_links_on_generated_pages(self):
"""
Test to ensure links of the form {tag}tagname and {category}catname
are generated correctly on pages
"""
settings = get_settings()
settings["PAGE_PATHS"] = ["TestPages"] # relative to CUR_DIR
settings["CACHE_PATH"] = self.temp_cache
settings["DEFAULT_DATE"] = (1970, 1, 1)
context = get_context(settings)
generator = PagesGenerator(
context=context,
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
pages_by_title = {p.title: p for p in generator.pages}
test_content = pages_by_title["Page with a bunch of links"].content
self.assertIn('<a href="/category/yeah.html">', test_content)
self.assertIn('<a href="/tag/matsuku.html">', test_content)
def test_static_and_attach_links_on_generated_pages(self):
"""
Test to ensure links of the form {static}filename and {attach}filename
are included in context['static_links']
"""
settings = get_settings()
settings["PAGE_PATHS"] = ["TestPages/page_with_static_links.md"]
settings["CACHE_PATH"] = self.temp_cache
settings["DEFAULT_DATE"] = (1970, 1, 1)
context = get_context(settings)
generator = PagesGenerator(
context=context,
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
self.assertIn("pelican/tests/TestPages/image0.jpg", context["static_links"])
self.assertIn("pelican/tests/TestPages/image1.jpg", context["static_links"])
class TestTemplatePagesGenerator(TestCaseWithCLocale):
TEMPLATE_CONTENT = "foo: {{ foo }}"
def setUp(self):
super().setUp()
self.temp_content = mkdtemp(prefix="pelicantests.")
self.temp_output = mkdtemp(prefix="pelicantests.")
def tearDown(self):
rmtree(self.temp_content)
rmtree(self.temp_output)
def test_generate_output(self):
settings = get_settings()
settings["STATIC_PATHS"] = ["static"]
settings["TEMPLATE_PAGES"] = {"template/source.html": "generated/file.html"}
generator = TemplatePagesGenerator(
context={"foo": "bar"},
settings=settings,
path=self.temp_content,
theme="",
output_path=self.temp_output,
)
# create a dummy template file
template_dir = os.path.join(self.temp_content, "template")
template_path = os.path.join(template_dir, "source.html")
os.makedirs(template_dir)
with open(template_path, "w") as template_file:
template_file.write(self.TEMPLATE_CONTENT)
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(self.temp_output, "generated", "file.html")
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with open(output_path) as output_file:
self.assertEqual(output_file.read(), "foo: bar")
class TestStaticGenerator(unittest.TestCase):
def setUp(self):
self.content_path = os.path.join(CUR_DIR, "mixed_content")
self.temp_content = mkdtemp(prefix="testcontent.")
self.temp_output = mkdtemp(prefix="testoutput.")
self.settings = get_settings()
self.settings["PATH"] = self.temp_content
self.settings["STATIC_PATHS"] = ["static"]
self.settings["OUTPUT_PATH"] = self.temp_output
os.mkdir(os.path.join(self.temp_content, "static"))
self.startfile = os.path.join(self.temp_content, "static", "staticfile")
self.endfile = os.path.join(self.temp_output, "static", "staticfile")
self.generator = StaticGenerator(
context=get_context(),
settings=self.settings,
path=self.temp_content,
theme="",
output_path=self.temp_output,
)
def tearDown(self):
rmtree(self.temp_content)
rmtree(self.temp_output)
def set_ancient_mtime(self, path, timestamp=1):
os.utime(path, (timestamp, timestamp))
def test_theme_static_paths_dirs(self):
"""Test that StaticGenerator properly copies also files mentioned in
TEMPLATE_STATIC_PATHS, not just directories."""
settings = get_settings(PATH=self.content_path)
context = get_context(settings, staticfiles=[])
StaticGenerator(
context=context,
settings=settings,
path=settings["PATH"],
output_path=self.temp_output,
theme=settings["THEME"],
).generate_output(None)
# The content of dirs listed in THEME_STATIC_PATHS (defaulting to
# "static") is put into the output
self.assertTrue(os.path.isdir(os.path.join(self.temp_output, "theme/css/")))
self.assertTrue(os.path.isdir(os.path.join(self.temp_output, "theme/fonts/")))
def test_theme_static_paths_files(self):
"""Test that StaticGenerator properly copies also files mentioned in
TEMPLATE_STATIC_PATHS, not just directories."""
settings = get_settings(
PATH=self.content_path,
THEME_STATIC_PATHS=["static/css/fonts.css", "static/fonts/"],
)
context = get_context(settings, staticfiles=[])
StaticGenerator(
context=context,
settings=settings,
path=settings["PATH"],
output_path=self.temp_output,
theme=settings["THEME"],
).generate_output(None)
# Only the content of dirs and files listed in THEME_STATIC_PATHS are
# put into the output, not everything from static/
self.assertFalse(os.path.isdir(os.path.join(self.temp_output, "theme/css/")))
self.assertFalse(os.path.isdir(os.path.join(self.temp_output, "theme/fonts/")))
self.assertTrue(
os.path.isfile(
os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.eot")
)
)
self.assertTrue(
os.path.isfile(
os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.svg")
)
)
self.assertTrue(
os.path.isfile(
os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.ttf")
)
)
self.assertTrue(
os.path.isfile(
os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.woff")
)
)
self.assertTrue(
os.path.isfile(
os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.woff2")
)
)
self.assertTrue(
os.path.isfile(os.path.join(self.temp_output, "theme/font.css"))
)
self.assertTrue(
os.path.isfile(os.path.join(self.temp_output, "theme/fonts.css"))
)
def test_static_excludes(self):
"""Test that StaticGenerator respects STATIC_EXCLUDES."""
settings = get_settings(
STATIC_EXCLUDES=["subdir"],
PATH=self.content_path,
STATIC_PATHS=[""],
)
context = get_context(settings)
StaticGenerator(
context=context,
settings=settings,
path=settings["PATH"],
output_path=self.temp_output,
theme=settings["THEME"],
).generate_context()
staticnames = [os.path.basename(c.source_path) for c in context["staticfiles"]]
self.assertNotIn(
"subdir_fake_image.jpg",
staticnames,
"StaticGenerator processed a file in a STATIC_EXCLUDES directory",
)
self.assertIn(
"fake_image.jpg",
staticnames,
"StaticGenerator skipped a file that it should have included",
)
def test_static_exclude_sources(self):
"""Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES."""
settings = get_settings(
STATIC_EXCLUDE_SOURCES=True,
PATH=self.content_path,
PAGE_PATHS=[""],
STATIC_PATHS=[""],
CACHE_CONTENT=False,
)
context = get_context(settings)
for generator_class in (PagesGenerator, StaticGenerator):
generator_class(
context=context,
settings=settings,
path=settings["PATH"],
output_path=self.temp_output,
theme=settings["THEME"],
).generate_context()
staticnames = [os.path.basename(c.source_path) for c in context["staticfiles"]]
self.assertFalse(
any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file",
)
settings.update(STATIC_EXCLUDE_SOURCES=False)
context = get_context(settings)
for generator_class in (PagesGenerator, StaticGenerator):
generator_class(
context=context,
settings=settings,
path=settings["PATH"],
output_path=self.temp_output,
theme=settings["THEME"],
).generate_context()
staticnames = [os.path.basename(c.source_path) for c in context["staticfiles"]]
self.assertTrue(
any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=False failed to include a markdown file",
)
def test_static_links(self):
"""Test that StaticGenerator uses files in static_links"""
settings = get_settings(
STATIC_EXCLUDES=["subdir"],
PATH=self.content_path,
STATIC_PATHS=[],
)
context = get_context(settings)
context["static_links"] |= {"short_page.md", "subdir_fake_image.jpg"}
StaticGenerator(
context=context,
settings=settings,
path=settings["PATH"],
output_path=self.temp_output,
theme=settings["THEME"],
).generate_context()
staticfiles_names = [
os.path.basename(c.source_path) for c in context["staticfiles"]
]
static_content_names = [os.path.basename(c) for c in context["static_content"]]
self.assertIn(
"short_page.md",
staticfiles_names,
"StaticGenerator skipped a file that it should have included",
)
self.assertIn(
"short_page.md",
static_content_names,
"StaticGenerator skipped a file that it should have included",
)
self.assertIn(
"subdir_fake_image.jpg",
staticfiles_names,
"StaticGenerator skipped a file that it should have included",
)
self.assertIn(
"subdir_fake_image.jpg",
static_content_names,
"StaticGenerator skipped a file that it should have included",
)
def test_copy_one_file(self):
with open(self.startfile, "w") as f:
f.write("staticcontent")
self.generator.generate_context()
self.generator.generate_output(None)
with open(self.endfile) as f:
self.assertEqual(f.read(), "staticcontent")
def test_file_update_required_when_dest_does_not_exist(self):
staticfile = MagicMock()
staticfile.source_path = self.startfile
staticfile.save_as = self.endfile
with open(staticfile.source_path, "w") as f:
f.write("a")
update_required = self.generator._file_update_required(staticfile)
self.assertTrue(update_required)
def test_dest_and_source_mtimes_are_equal(self):
staticfile = MagicMock()
staticfile.source_path = self.startfile
staticfile.save_as = self.endfile
self.settings["STATIC_CHECK_IF_MODIFIED"] = True
with open(staticfile.source_path, "w") as f:
f.write("a")
os.mkdir(os.path.join(self.temp_output, "static"))
copy(staticfile.source_path, staticfile.save_as)
isnewer = self.generator._source_is_newer(staticfile)
self.assertFalse(isnewer)
def test_source_is_newer(self):
staticfile = MagicMock()
staticfile.source_path = self.startfile
staticfile.save_as = self.endfile
with open(staticfile.source_path, "w") as f:
f.write("a")
os.mkdir(os.path.join(self.temp_output, "static"))
copy(staticfile.source_path, staticfile.save_as)
self.set_ancient_mtime(staticfile.save_as)
isnewer = self.generator._source_is_newer(staticfile)
self.assertTrue(isnewer)
def test_skip_file_when_source_is_not_newer(self):
self.settings["STATIC_CHECK_IF_MODIFIED"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
with open(self.endfile, "w") as f:
f.write("staticcontent")
expected = os.path.getmtime(self.endfile)
self.set_ancient_mtime(self.startfile)
self.generator.generate_context()
self.generator.generate_output(None)
self.assertEqual(os.path.getmtime(self.endfile), expected)
def test_dont_link_by_default(self):
with open(self.startfile, "w") as f:
f.write("staticcontent")
self.generator.generate_context()
self.generator.generate_output(None)
self.assertFalse(os.path.samefile(self.startfile, self.endfile))
def test_output_file_is_linked_to_source(self):
self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
self.generator.generate_context()
self.generator.generate_output(None)
self.assertTrue(os.path.samefile(self.startfile, self.endfile))
def test_output_file_exists_and_is_newer(self):
self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
with open(self.endfile, "w") as f:
f.write("othercontent")
self.generator.generate_context()
self.generator.generate_output(None)
self.assertTrue(os.path.samefile(self.startfile, self.endfile))
@unittest.skipUnless(can_symlink(), "No symlink privilege")
def test_can_symlink_when_hardlink_not_possible(self):
self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
self.generator.fallback_to_symlinks = True
self.generator.generate_context()
self.generator.generate_output(None)
self.assertTrue(os.path.islink(self.endfile))
@unittest.skipUnless(can_symlink(), "No symlink privilege")
def test_existing_symlink_is_considered_up_to_date(self):
self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
os.symlink(self.startfile, self.endfile)
staticfile = MagicMock()
staticfile.source_path = self.startfile
staticfile.save_as = self.endfile
requires_update = self.generator._file_update_required(staticfile)
self.assertFalse(requires_update)
@unittest.skipUnless(can_symlink(), "No symlink privilege")
def test_invalid_symlink_is_overwritten(self):
self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
os.symlink("invalid", self.endfile)
staticfile = MagicMock()
staticfile.source_path = self.startfile
staticfile.save_as = self.endfile
requires_update = self.generator._file_update_required(staticfile)
self.assertTrue(requires_update)
self.generator.fallback_to_symlinks = True
self.generator.generate_context()
self.generator.generate_output(None)
self.assertTrue(os.path.islink(self.endfile))
# os.path.realpath is broken on Windows before python3.8 for symlinks.
# This is a (ugly) workaround.
# see: https://bugs.python.org/issue9949
if os.name == "nt" and sys.version_info < (3, 8):
def get_real_path(path):
return os.readlink(path) if os.path.islink(path) else path
else:
get_real_path = os.path.realpath
self.assertEqual(get_real_path(self.endfile), get_real_path(self.startfile))
def test_delete_existing_file_before_mkdir(self):
with open(self.startfile, "w") as f:
f.write("staticcontent")
with open(os.path.join(self.temp_output, "static"), "w") as f:
f.write("This file should be a directory")
self.generator.generate_context()
self.generator.generate_output(None)
self.assertTrue(os.path.isdir(os.path.join(self.temp_output, "static")))
self.assertTrue(os.path.isfile(self.endfile))
class TestJinja2Environment(TestCaseWithCLocale):
def setUp(self):
self.temp_content = mkdtemp(prefix="pelicantests.")
self.temp_output = mkdtemp(prefix="pelicantests.")
def tearDown(self):
rmtree(self.temp_content)
rmtree(self.temp_output)
def _test_jinja2_helper(self, additional_settings, content, expected):
settings = get_settings()
settings["STATIC_PATHS"] = ["static"]
settings["TEMPLATE_PAGES"] = {"template/source.html": "generated/file.html"}
settings.update(additional_settings)
generator = TemplatePagesGenerator(
context={"foo": "foo", "bar": "bar"},
settings=settings,
path=self.temp_content,
theme="",
output_path=self.temp_output,
)
# create a dummy template file
template_dir = os.path.join(self.temp_content, "template")
template_path = os.path.join(template_dir, "source.html")
os.makedirs(template_dir)
with open(template_path, "w") as template_file:
template_file.write(content)
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(self.temp_output, "generated", "file.html")
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with open(output_path) as output_file:
self.assertEqual(output_file.read(), expected)
def test_jinja2_filter(self):
"""JINJA_FILTERS adds custom filters to Jinja2 environment"""
content = "foo: {{ foo|custom_filter }}, bar: {{ bar|custom_filter }}"
settings = {"JINJA_FILTERS": {"custom_filter": lambda x: x.upper()}}
expected = "foo: FOO, bar: BAR"
self._test_jinja2_helper(settings, content, expected)
def test_jinja2_filter_plugin_enabled(self):
"""JINJA_FILTERS adds custom filters to Jinja2 environment"""
settings = {"PLUGINS": ["legacy_plugin", "pelican.plugins.ns_plugin"]}
jinja_template = (
"{plugin}: "
"{{% if '{plugin}' is plugin_enabled %}}yes"
"{{% else %}}no{{% endif %}}"
)
content = " / ".join(
(
jinja_template.format(plugin="ns_plugin"),
jinja_template.format(plugin="pelican.plugins.ns_plugin"),
jinja_template.format(plugin="legacy_plugin"),
jinja_template.format(plugin="unknown"),
)
)
expected = (
"ns_plugin: yes / "
"pelican.plugins.ns_plugin: yes / "
"legacy_plugin: yes / "
"unknown: no"
)
self._test_jinja2_helper(settings, content, expected)
def test_jinja2_test(self):
"""JINJA_TESTS adds custom tests to Jinja2 environment"""
content = "foo {{ foo is custom_test }}, bar {{ bar is custom_test }}"
settings = {"JINJA_TESTS": {"custom_test": lambda x: x == "bar"}}
expected = "foo False, bar True"
self._test_jinja2_helper(settings, content, expected)
def test_jinja2_global(self):
"""JINJA_GLOBALS adds custom globals to Jinja2 environment"""
content = "{{ custom_global }}"
settings = {"JINJA_GLOBALS": {"custom_global": "foobar"}}
expected = "foobar"
self._test_jinja2_helper(settings, content, expected)
def test_jinja2_extension(self):
"""JINJA_ENVIRONMENT adds extensions to Jinja2 environment"""
content = "{% set stuff = [] %}{% do stuff.append(1) %}{{ stuff }}"
settings = {"JINJA_ENVIRONMENT": {"extensions": ["jinja2.ext.do"]}}
expected = "[1]"
self._test_jinja2_helper(settings, content, expected)
| 62,599
|
Python
|
.py
| 1,495
| 31.010702
| 88
| 0.58134
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,231
|
test_server.py
|
getpelican_pelican/pelican/tests/test_server.py
|
import os
from io import BytesIO
from shutil import rmtree
from tempfile import mkdtemp
from pelican.server import ComplexHTTPRequestHandler
from pelican.tests.support import unittest
class MockRequest:
def makefile(self, *args, **kwargs):
return BytesIO(b"")
class MockServer:
pass
class TestServer(unittest.TestCase):
def setUp(self):
self.server = MockServer()
self.temp_output = mkdtemp(prefix="pelicantests.")
self.old_cwd = os.getcwd()
os.chdir(self.temp_output)
def tearDown(self):
os.chdir(self.old_cwd)
rmtree(self.temp_output)
def test_get_path_that_exists(self):
handler = ComplexHTTPRequestHandler(
MockRequest(), ("0.0.0.0", 8888), self.server
)
handler.base_path = self.temp_output
open(os.path.join(self.temp_output, "foo.html"), "a").close()
os.mkdir(os.path.join(self.temp_output, "foo"))
open(os.path.join(self.temp_output, "foo", "index.html"), "a").close()
os.mkdir(os.path.join(self.temp_output, "bar"))
open(os.path.join(self.temp_output, "bar", "index.html"), "a").close()
os.mkdir(os.path.join(self.temp_output, "baz"))
for suffix in ["", "/"]:
# foo.html has precedence over foo/index.html
path = handler.get_path_that_exists("foo" + suffix)
self.assertEqual(path, "foo.html")
# folder with index.html should return folder/index.html
path = handler.get_path_that_exists("bar" + suffix)
self.assertEqual(path, "bar/index.html")
# folder without index.html should return same as input
path = handler.get_path_that_exists("baz" + suffix)
self.assertEqual(path, "baz" + suffix)
# not existing path should return None
path = handler.get_path_that_exists("quux" + suffix)
self.assertIsNone(path)
| 1,943
|
Python
|
.py
| 44
| 35.704545
| 78
| 0.638343
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,232
|
test_settings.py
|
getpelican_pelican/pelican/tests/test_settings.py
|
import copy
import locale
import os
from os.path import abspath, dirname, join
from pelican.settings import (
DEFAULT_CONFIG,
DEFAULT_THEME,
_printf_s_to_format_field,
configure_settings,
handle_deprecated_settings,
read_settings,
)
from pelican.tests.support import unittest
class TestSettingsConfiguration(unittest.TestCase):
"""Provided a file, it should read it, replace the default values,
append new values to the settings (if any), and apply basic settings
optimizations.
"""
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, "C")
self.PATH = abspath(dirname(__file__))
default_conf = join(self.PATH, "default_conf.py")
self.settings = read_settings(default_conf)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_overwrite_existing_settings(self):
self.assertEqual(self.settings.get("SITENAME"), "Alexis' log")
self.assertEqual(self.settings.get("SITEURL"), "http://blog.notmyidea.org")
def test_keep_default_settings(self):
# Keep default settings if not defined.
self.assertEqual(
self.settings.get("DEFAULT_CATEGORY"), DEFAULT_CONFIG["DEFAULT_CATEGORY"]
)
def test_dont_copy_small_keys(self):
# Do not copy keys not in caps.
self.assertNotIn("foobar", self.settings)
def test_read_empty_settings(self):
# Ensure an empty settings file results in default settings.
settings = read_settings(None)
expected = copy.deepcopy(DEFAULT_CONFIG)
# Added by configure settings
expected["FEED_DOMAIN"] = ""
expected["ARTICLE_EXCLUDES"] = ["pages"]
expected["PAGE_EXCLUDES"] = [""]
self.maxDiff = None
self.assertDictEqual(settings, expected)
def test_settings_return_independent(self):
# Make sure that the results from one settings call doesn't
# effect past or future instances.
self.PATH = abspath(dirname(__file__))
default_conf = join(self.PATH, "default_conf.py")
settings = read_settings(default_conf)
settings["SITEURL"] = "new-value"
new_settings = read_settings(default_conf)
self.assertNotEqual(new_settings["SITEURL"], settings["SITEURL"])
def test_defaults_not_overwritten(self):
# This assumes 'SITENAME': 'A Pelican Blog'
settings = read_settings(None)
settings["SITENAME"] = "Not a Pelican Blog"
self.assertNotEqual(settings["SITENAME"], DEFAULT_CONFIG["SITENAME"])
def test_static_path_settings_safety(self):
# Disallow static paths from being strings
settings = {
"STATIC_PATHS": "foo/bar",
"THEME_STATIC_PATHS": "bar/baz",
# These 4 settings are required to run configure_settings
"PATH": ".",
"THEME": DEFAULT_THEME,
"SITEURL": "http://blog.notmyidea.org/",
"LOCALE": "",
}
configure_settings(settings)
self.assertEqual(settings["STATIC_PATHS"], DEFAULT_CONFIG["STATIC_PATHS"])
self.assertEqual(
settings["THEME_STATIC_PATHS"], DEFAULT_CONFIG["THEME_STATIC_PATHS"]
)
def test_configure_settings(self):
# Manipulations to settings should be applied correctly.
settings = {
"SITEURL": "http://blog.notmyidea.org/",
"LOCALE": "",
"PATH": os.curdir,
"THEME": DEFAULT_THEME,
}
configure_settings(settings)
# SITEURL should not have a trailing slash
self.assertEqual(settings["SITEURL"], "http://blog.notmyidea.org")
# FEED_DOMAIN, if undefined, should default to SITEURL
self.assertEqual(settings["FEED_DOMAIN"], "http://blog.notmyidea.org")
settings["FEED_DOMAIN"] = "http://feeds.example.com"
configure_settings(settings)
self.assertEqual(settings["FEED_DOMAIN"], "http://feeds.example.com")
def test_theme_settings_exceptions(self):
settings = self.settings
# Check that theme lookup in "pelican/themes" functions as expected
settings["THEME"] = os.path.split(settings["THEME"])[1]
configure_settings(settings)
self.assertEqual(settings["THEME"], DEFAULT_THEME)
# Check that non-existent theme raises exception
settings["THEME"] = "foo"
self.assertRaises(Exception, configure_settings, settings)
def test_deprecated_dir_setting(self):
settings = self.settings
settings["ARTICLE_DIR"] = "foo"
settings["PAGE_DIR"] = "bar"
settings = handle_deprecated_settings(settings)
self.assertEqual(settings["ARTICLE_PATHS"], ["foo"])
self.assertEqual(settings["PAGE_PATHS"], ["bar"])
with self.assertRaises(KeyError):
settings["ARTICLE_DIR"]
settings["PAGE_DIR"]
def test_default_encoding(self):
# Test that the user locale is set if not specified in settings
locale.setlocale(locale.LC_ALL, "C")
# empty string = user system locale
self.assertEqual(self.settings["LOCALE"], [""])
configure_settings(self.settings)
lc_time = locale.getlocale(locale.LC_TIME) # should be set to user locale
# explicitly set locale to user pref and test
locale.setlocale(locale.LC_TIME, "")
self.assertEqual(lc_time, locale.getlocale(locale.LC_TIME))
def test_invalid_settings_throw_exception(self):
# Test that the path name is valid
# test that 'PATH' is set
settings = {}
self.assertRaises(Exception, configure_settings, settings)
# Test that 'PATH' is valid
settings["PATH"] = ""
self.assertRaises(Exception, configure_settings, settings)
# Test nonexistent THEME
settings["PATH"] = os.curdir
settings["THEME"] = "foo"
self.assertRaises(Exception, configure_settings, settings)
def test__printf_s_to_format_field(self):
for s in ("%s", "{%s}", "{%s"):
option = f"foo/{s}/bar.baz"
result = _printf_s_to_format_field(option, "slug")
expected = option % "qux"
found = result.format(slug="qux")
self.assertEqual(expected, found)
def test_deprecated_extra_templates_paths(self):
settings = self.settings
settings["EXTRA_TEMPLATES_PATHS"] = ["/foo/bar", "/ha"]
settings = handle_deprecated_settings(settings)
self.assertEqual(settings["THEME_TEMPLATES_OVERRIDES"], ["/foo/bar", "/ha"])
self.assertNotIn("EXTRA_TEMPLATES_PATHS", settings)
def test_deprecated_paginated_direct_templates(self):
settings = self.settings
settings["PAGINATED_DIRECT_TEMPLATES"] = ["index", "archives"]
settings["PAGINATED_TEMPLATES"] = {"index": 10, "category": None}
settings = handle_deprecated_settings(settings)
self.assertEqual(
settings["PAGINATED_TEMPLATES"],
{"index": 10, "category": None, "archives": None},
)
self.assertNotIn("PAGINATED_DIRECT_TEMPLATES", settings)
def test_deprecated_paginated_direct_templates_from_file(self):
# This is equivalent to reading a settings file that has
# PAGINATED_DIRECT_TEMPLATES defined but no PAGINATED_TEMPLATES.
settings = read_settings(
None, override={"PAGINATED_DIRECT_TEMPLATES": ["index", "archives"]}
)
self.assertEqual(
settings["PAGINATED_TEMPLATES"],
{
"archives": None,
"author": None,
"index": None,
"category": None,
"tag": None,
},
)
self.assertNotIn("PAGINATED_DIRECT_TEMPLATES", settings)
def test_theme_and_extra_templates_exception(self):
settings = self.settings
settings["EXTRA_TEMPLATES_PATHS"] = ["/ha"]
settings["THEME_TEMPLATES_OVERRIDES"] = ["/foo/bar"]
self.assertRaises(Exception, handle_deprecated_settings, settings)
def test_slug_and_slug_regex_substitutions_exception(self):
settings = {}
settings["SLUG_REGEX_SUBSTITUTIONS"] = [("C++", "cpp")]
settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
self.assertRaises(Exception, handle_deprecated_settings, settings)
def test_deprecated_slug_substitutions(self):
default_slug_regex_subs = self.settings["SLUG_REGEX_SUBSTITUTIONS"]
# If no deprecated setting is set, don't set new ones
settings = {}
settings = handle_deprecated_settings(settings)
self.assertNotIn("SLUG_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn("TAG_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn("CATEGORY_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn("AUTHOR_REGEX_SUBSTITUTIONS", settings)
# If SLUG_SUBSTITUTIONS is set, set {SLUG, AUTHOR}_REGEX_SUBSTITUTIONS
# correctly, don't set {CATEGORY, TAG}_REGEX_SUBSTITUTIONS
settings = {}
settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp")]
settings = handle_deprecated_settings(settings)
self.assertEqual(
settings.get("SLUG_REGEX_SUBSTITUTIONS"),
[(r"C\+\+", "cpp")] + default_slug_regex_subs,
)
self.assertNotIn("TAG_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn("CATEGORY_REGEX_SUBSTITUTIONS", settings)
self.assertEqual(
settings.get("AUTHOR_REGEX_SUBSTITUTIONS"), default_slug_regex_subs
)
# If {CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set
# {CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly, don't set
# SLUG_REGEX_SUBSTITUTIONS
settings = {}
settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
settings["CATEGORY_SUBSTITUTIONS"] = [("C#", "csharp")]
settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov")]
settings = handle_deprecated_settings(settings)
self.assertNotIn("SLUG_REGEX_SUBSTITUTIONS", settings)
self.assertEqual(
settings["TAG_REGEX_SUBSTITUTIONS"],
[(r"C\#", "csharp")] + default_slug_regex_subs,
)
self.assertEqual(
settings["CATEGORY_REGEX_SUBSTITUTIONS"],
[(r"C\#", "csharp")] + default_slug_regex_subs,
)
self.assertEqual(
settings["AUTHOR_REGEX_SUBSTITUTIONS"],
[(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
)
# If {SLUG, CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set
# {SLUG, CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly
settings = {}
settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp")]
settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
settings["CATEGORY_SUBSTITUTIONS"] = [("C#", "csharp")]
settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov")]
settings = handle_deprecated_settings(settings)
self.assertEqual(
settings["TAG_REGEX_SUBSTITUTIONS"],
[(r"C\+\+", "cpp")] + [(r"C\#", "csharp")] + default_slug_regex_subs,
)
self.assertEqual(
settings["CATEGORY_REGEX_SUBSTITUTIONS"],
[(r"C\+\+", "cpp")] + [(r"C\#", "csharp")] + default_slug_regex_subs,
)
self.assertEqual(
settings["AUTHOR_REGEX_SUBSTITUTIONS"],
[(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
)
# Handle old 'skip' flags correctly
settings = {}
settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp", True)]
settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov", False)]
settings = handle_deprecated_settings(settings)
self.assertEqual(
settings.get("SLUG_REGEX_SUBSTITUTIONS"),
[(r"C\+\+", "cpp")] + [(r"(?u)\A\s*", ""), (r"(?u)\s*\Z", "")],
)
self.assertEqual(
settings["AUTHOR_REGEX_SUBSTITUTIONS"],
[(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
)
def test_deprecated_slug_substitutions_from_file(self):
# This is equivalent to reading a settings file that has
# SLUG_SUBSTITUTIONS defined but no SLUG_REGEX_SUBSTITUTIONS.
settings = read_settings(
None, override={"SLUG_SUBSTITUTIONS": [("C++", "cpp")]}
)
self.assertEqual(
settings["SLUG_REGEX_SUBSTITUTIONS"],
[(r"C\+\+", "cpp")] + self.settings["SLUG_REGEX_SUBSTITUTIONS"],
)
self.assertNotIn("SLUG_SUBSTITUTIONS", settings)
| 12,755
|
Python
|
.py
| 272
| 37.558824
| 85
| 0.625965
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,233
|
test_urlwrappers.py
|
getpelican_pelican/pelican/tests/test_urlwrappers.py
|
from pelican.tests.support import unittest
from pelican.urlwrappers import Author, Category, Tag, URLWrapper
class TestURLWrapper(unittest.TestCase):
def test_ordering(self):
# URLWrappers are sorted by name
wrapper_a = URLWrapper(name="first", settings={})
wrapper_b = URLWrapper(name="last", settings={})
self.assertFalse(wrapper_a > wrapper_b)
self.assertFalse(wrapper_a >= wrapper_b)
self.assertFalse(wrapper_a == wrapper_b)
self.assertTrue(wrapper_a != wrapper_b)
self.assertTrue(wrapper_a <= wrapper_b)
self.assertTrue(wrapper_a < wrapper_b)
wrapper_b.name = "first"
self.assertFalse(wrapper_a > wrapper_b)
self.assertTrue(wrapper_a >= wrapper_b)
self.assertTrue(wrapper_a == wrapper_b)
self.assertFalse(wrapper_a != wrapper_b)
self.assertTrue(wrapper_a <= wrapper_b)
self.assertFalse(wrapper_a < wrapper_b)
wrapper_a.name = "last"
self.assertTrue(wrapper_a > wrapper_b)
self.assertTrue(wrapper_a >= wrapper_b)
self.assertFalse(wrapper_a == wrapper_b)
self.assertTrue(wrapper_a != wrapper_b)
self.assertFalse(wrapper_a <= wrapper_b)
self.assertFalse(wrapper_a < wrapper_b)
def test_equality(self):
tag = Tag("test", settings={})
cat = Category("test", settings={})
author = Author("test", settings={})
# same name, but different class
self.assertNotEqual(tag, cat)
self.assertNotEqual(tag, author)
# should be equal vs text representing the same name
self.assertEqual(tag, "test")
# should not be equal vs binary
self.assertNotEqual(tag, b"test")
# Tags describing the same should be equal
tag_equal = Tag("Test", settings={})
self.assertEqual(tag, tag_equal)
# Author describing the same should be equal
author_equal = Author("Test", settings={})
self.assertEqual(author, author_equal)
cat_ascii = Category("指導書", settings={})
self.assertEqual(cat_ascii, "zhi dao shu")
def test_slugify_with_substitutions_and_dots(self):
tag = Tag(
"Tag Dot",
settings={
"TAG_REGEX_SUBSTITUTIONS": [
("Tag Dot", "tag.dot"),
]
},
)
cat = Category(
"Category Dot",
settings={
"CATEGORY_REGEX_SUBSTITUTIONS": [
("Category Dot", "cat.dot"),
]
},
)
self.assertEqual(tag.slug, "tag.dot")
self.assertEqual(cat.slug, "cat.dot")
def test_author_slug_substitutions(self):
settings = {
"AUTHOR_REGEX_SUBSTITUTIONS": [
("Alexander Todorov", "atodorov"),
("Krasimir Tsonev", "krasimir"),
(r"[^\w\s-]", ""),
(r"(?u)\A\s*", ""),
(r"(?u)\s*\Z", ""),
(r"[-\s]+", "-"),
]
}
author1 = Author("Mr. Senko", settings=settings)
author2 = Author("Alexander Todorov", settings=settings)
author3 = Author("Krasimir Tsonev", settings=settings)
self.assertEqual(author1.slug, "mr-senko")
self.assertEqual(author2.slug, "atodorov")
self.assertEqual(author3.slug, "krasimir")
| 3,409
|
Python
|
.py
| 82
| 31.158537
| 65
| 0.577563
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,234
|
test_pelican.py
|
getpelican_pelican/pelican/tests/test_pelican.py
|
import contextlib
import io
import locale
import logging
import os
import subprocess
import sys
import unittest
from collections.abc import Sequence
from shutil import rmtree
from tempfile import TemporaryDirectory, mkdtemp
from unittest.mock import PropertyMock, patch
from rich.console import Console
import pelican.readers
from pelican import Pelican, __version__, main
from pelican.generators import StaticGenerator
from pelican.settings import read_settings
from pelican.tests.support import (
LoggedTestCase,
diff_subproc,
locale_available,
mute,
skipIfNoExecutable,
)
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, os.pardir, os.pardir, "samples")
)
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, "output"))
INPUT_PATH = os.path.join(SAMPLES_PATH, "content")
SAMPLE_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf.py")
SAMPLE_FR_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf_FR.py")
def recursiveDiff(dcmp):
diff = {
"diff_files": [os.path.join(dcmp.right, f) for f in dcmp.diff_files],
"left_only": [os.path.join(dcmp.right, f) for f in dcmp.left_only],
"right_only": [os.path.join(dcmp.right, f) for f in dcmp.right_only],
}
for sub_dcmp in dcmp.subdirs.values():
for k, v in recursiveDiff(sub_dcmp).items():
diff[k] += v
return diff
class TestPelican(LoggedTestCase):
# general functional testing for pelican. Basically, this test case tries
# to run pelican in different situations and see how it behaves
def setUp(self):
super().setUp()
self.temp_path = mkdtemp(prefix="pelicantests.")
self.temp_cache = mkdtemp(prefix="pelican_cache.")
self.maxDiff = None
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, "C")
def tearDown(self):
read_settings() # cleanup PYGMENTS_RST_OPTIONS
rmtree(self.temp_path)
rmtree(self.temp_cache)
locale.setlocale(locale.LC_ALL, self.old_locale)
super().tearDown()
def assertDirsEqual(self, left_path, right_path, msg=None):
"""
Check if the files are the same (ignoring whitespace) below both paths.
"""
proc = diff_subproc(left_path, right_path)
out, err = proc.communicate()
if proc.returncode != 0:
msg = self._formatMessage(
msg,
f"{left_path} and {right_path} differ:\nstdout:\n{out}\nstderr\n{err}",
)
raise self.failureException(msg)
def test_order_of_generators(self):
# StaticGenerator must run last, so it can identify files that
# were skipped by the other generators, and so static files can
# have their output paths overridden by the {attach} link syntax.
pelican = Pelican(settings=read_settings(path=None))
generator_classes = pelican._get_generator_classes()
self.assertTrue(
generator_classes[-1] is StaticGenerator,
"StaticGenerator must be the last generator, but it isn't!",
)
self.assertIsInstance(
generator_classes,
Sequence,
"_get_generator_classes() must return a Sequence to preserve order",
)
@skipIfNoExecutable(["git", "--version"])
def test_basic_generation_works(self):
# when running pelican without settings, it should pick up the default
# ones and generate correct output without raising any exception
settings = read_settings(
path=None,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"LOCALE": locale.normalize("en_US"),
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, "basic"))
self.assertLogCountEqual(
count=1,
msg="Unable to find.*skipping url replacement",
level=logging.WARNING,
)
@skipIfNoExecutable(["git", "--version"])
def test_custom_generation_works(self):
# the same thing with a specified set of settings should work
settings = read_settings(
path=SAMPLE_CONFIG,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"LOCALE": locale.normalize("en_US.UTF-8"),
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, "custom"))
@skipIfNoExecutable(["git", "--version"])
@unittest.skipUnless(
locale_available("fr_FR.UTF-8") or locale_available("French"),
"French locale needed",
)
def test_custom_locale_generation_works(self):
"""Test that generation with fr_FR.UTF-8 locale works"""
if sys.platform == "win32":
our_locale = "French"
else:
our_locale = "fr_FR.UTF-8"
settings = read_settings(
path=SAMPLE_FR_CONFIG,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"LOCALE": our_locale,
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, "custom_locale"))
def test_theme_static_paths_copy(self):
# the same thing with a specified set of settings should work
settings = read_settings(
path=SAMPLE_CONFIG,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"THEME_STATIC_PATHS": [
os.path.join(SAMPLES_PATH, "very"),
os.path.join(SAMPLES_PATH, "kinda"),
os.path.join(SAMPLES_PATH, "theme_standard"),
],
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, "theme")
extra_path = os.path.join(theme_output, "exciting", "new", "files")
for file in ["a_stylesheet", "a_template"]:
self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
for file in ["wow!", "boom!", "bap!", "zap!"]:
self.assertTrue(os.path.exists(os.path.join(extra_path, file)))
def test_theme_static_paths_copy_single_file(self):
# the same thing with a specified set of settings should work
settings = read_settings(
path=SAMPLE_CONFIG,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"THEME_STATIC_PATHS": [os.path.join(SAMPLES_PATH, "theme_standard")],
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, "theme")
for file in ["a_stylesheet", "a_template"]:
self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
def test_cyclic_intersite_links_no_warnings(self):
settings = read_settings(
path=None,
override={
"PATH": os.path.join(CURRENT_DIR, "cyclic_intersite_links"),
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
# There are four different intersite links:
# - one pointing to the second article from first and third
# - one pointing to the first article from second and third
# - one pointing to the third article from first and second
# - one pointing to a nonexistent from each
# If everything goes well, only the warning about the nonexistent
# article should be printed. Only two articles are not sufficient,
# since the first will always have _context['generated_content'] empty
# (thus skipping the link resolving) and the second will always have it
# non-empty, containing the first, thus always succeeding.
self.assertLogCountEqual(
count=1,
msg="Unable to find '.*\\.rst', skipping url replacement.",
level=logging.WARNING,
)
def test_md_extensions_deprecation(self):
"""Test that a warning is issued if MD_EXTENSIONS is used"""
settings = read_settings(
path=None,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"MD_EXTENSIONS": {},
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertLogCountEqual(
count=1,
msg="MD_EXTENSIONS is deprecated use MARKDOWN instead.",
level=logging.WARNING,
)
def test_parse_errors(self):
# Verify that just an error is printed and the application doesn't
# abort, exit or something.
settings = read_settings(
path=None,
override={
"PATH": os.path.abspath(os.path.join(CURRENT_DIR, "parse_error")),
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertLogCountEqual(
count=1, msg="Could not process .*parse_error.rst", level=logging.ERROR
)
def test_module_load(self):
"""Test loading via python -m pelican --help displays the help"""
output = subprocess.check_output(
[sys.executable, "-m", "pelican", "--help"]
).decode("ascii", "replace")
assert "usage:" in output
def test_main_version(self):
"""Run main --version."""
out = io.StringIO()
with contextlib.redirect_stdout(out):
with self.assertRaises(SystemExit):
main(["--version"])
self.assertEqual(f"{__version__}\n", out.getvalue())
def test_main_help(self):
"""Run main --help."""
out = io.StringIO()
with contextlib.redirect_stdout(out):
with self.assertRaises(SystemExit):
main(["--help"])
self.assertIn("A tool to generate a static blog", out.getvalue())
def test_main_on_content(self):
"""Invoke main on simple_content directory."""
out, err = io.StringIO(), io.StringIO()
with contextlib.redirect_stdout(out), contextlib.redirect_stderr(err):
with TemporaryDirectory() as temp_dir:
# Don't highlight anything.
# See https://rich.readthedocs.io/en/stable/highlighting.html
with patch("pelican.console", new=Console(highlight=False)):
main(["-o", temp_dir, "pelican/tests/simple_content"])
self.assertIn("Processed 1 article", out.getvalue())
self.assertEqual("", err.getvalue())
def test_main_on_content_markdown_disabled(self):
"""Invoke main on simple_content directory."""
with patch.object(
pelican.readers.MarkdownReader, "enabled", new_callable=PropertyMock
) as attr_mock:
attr_mock.return_value = False
out, err = io.StringIO(), io.StringIO()
with contextlib.redirect_stdout(out), contextlib.redirect_stderr(err):
with TemporaryDirectory() as temp_dir:
# Don't highlight anything.
# See https://rich.readthedocs.io/en/stable/highlighting.html
with patch("pelican.console", new=Console(highlight=False)):
main(["-o", temp_dir, "pelican/tests/simple_content"])
self.assertIn("Processed 0 articles", out.getvalue())
self.assertLogCountEqual(
1,
".*article_with_md_extension.md: "
"Could not import 'markdown.Markdown'. "
"Have you installed the 'markdown' package?",
)
| 12,649
|
Python
|
.py
| 294
| 32.673469
| 88
| 0.598929
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,235
|
support.py
|
getpelican_pelican/pelican/tests/support.py
|
import locale
import logging
import os
import re
import subprocess
import sys
import unittest
from contextlib import contextmanager
from functools import wraps
from io import StringIO
from logging.handlers import BufferingHandler
from shutil import rmtree
from tempfile import mkdtemp
from pelican.contents import Article
from pelican.readers import default_metadata
from pelican.settings import DEFAULT_CONFIG
__all__ = [
"get_article",
"unittest",
]
@contextmanager
def temporary_folder():
"""creates a temporary folder, return it and delete it afterwards.
This allows to do something like this in tests:
>>> with temporary_folder() as d:
# do whatever you want
"""
tempdir = mkdtemp()
try:
yield tempdir
finally:
rmtree(tempdir)
def isplit(s, sep=None):
"""Behaves like str.split but returns a generator instead of a list.
>>> list(isplit('\tUse the force\n')) == '\tUse the force\n'.split()
True
>>> list(isplit('\tUse the force\n')) == ['Use', 'the', 'force']
True
>>> (list(isplit('\tUse the force\n', "e"))
== '\tUse the force\n'.split("e"))
True
>>> list(isplit('Use the force', "e")) == 'Use the force'.split("e")
True
>>> list(isplit('Use the force', "e")) == ['Us', ' th', ' forc', '']
True
"""
sep, hardsep = r"\s+" if sep is None else re.escape(sep), sep is not None
exp, pos, length = re.compile(sep), 0, len(s)
while True:
m = exp.search(s, pos)
if not m:
if pos < length or hardsep:
# ^ mimic "split()": ''.split() returns []
yield s[pos:]
break
start = m.start()
if pos < start or hardsep:
# ^ mimic "split()": includes trailing empty string
yield s[pos:start]
pos = m.end()
def mute(returns_output=False):
"""Decorate a function that prints to stdout, intercepting the output.
If "returns_output" is True, the function will return a generator
yielding the printed lines instead of the return values.
The decorator literally hijack sys.stdout during each function
execution, so be careful with what you apply it to.
>>> def numbers():
print "42"
print "1984"
...
>>> numbers()
42
1984
>>> mute()(numbers)()
>>> list(mute(True)(numbers)())
['42', '1984']
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
saved_stdout = sys.stdout
sys.stdout = StringIO()
try:
out = func(*args, **kwargs)
if returns_output:
out = isplit(sys.stdout.getvalue().strip())
finally:
sys.stdout = saved_stdout
return out
return wrapper
return decorator
def get_article(title, content, **extra_metadata):
metadata = default_metadata(settings=DEFAULT_CONFIG)
metadata["title"] = title
if extra_metadata:
metadata.update(extra_metadata)
return Article(content, metadata=metadata)
def skipIfNoExecutable(executable):
"""Skip test if `executable` is not found
Tries to run `executable` with subprocess to make sure it's in the path,
and skips the tests if not found (if subprocess raises a `OSError`).
"""
with open(os.devnull, "w") as fnull:
try:
res = subprocess.call(executable, stdout=fnull, stderr=fnull)
except OSError:
res = None
if res is None:
return unittest.skip(f"{executable} executable not found")
return lambda func: func
def module_exists(module_name):
"""Test if a module is importable."""
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def locale_available(locale_):
old_locale = locale.setlocale(locale.LC_TIME)
try:
locale.setlocale(locale.LC_TIME, str(locale_))
except locale.Error:
return False
else:
locale.setlocale(locale.LC_TIME, old_locale)
return True
def can_symlink():
res = True
try:
with temporary_folder() as f:
os.symlink(f, os.path.join(f, "symlink"))
except OSError:
res = False
return res
def get_settings(**kwargs):
"""Provide tweaked setting dictionaries for testing
Set keyword arguments to override specific settings.
"""
settings = DEFAULT_CONFIG.copy()
for key, value in kwargs.items():
settings[key] = value
return settings
def get_context(settings=None, **kwargs):
context = settings.copy() if settings else {}
context["generated_content"] = {}
context["static_links"] = set()
context["static_content"] = {}
context.update(kwargs)
return context
class LogCountHandler(BufferingHandler):
"""Capturing and counting logged messages."""
def __init__(self, capacity=1000):
super().__init__(capacity)
def count_logs(self, msg=None, level=None):
return len(
[
rec
for rec in self.buffer
if (msg is None or re.match(msg, rec.getMessage()))
and (level is None or rec.levelno == level)
]
)
def count_formatted_logs(self, msg=None, level=None):
return len(
[
rec
for rec in self.buffer
if (msg is None or re.search(msg, self.format(rec)))
and (level is None or rec.levelno == level)
]
)
def diff_subproc(first, second):
"""
Return a subprocess that runs a diff on the two paths.
Check results with::
>>> out_stream, err_stream = proc.communicate()
>>> didCheckFail = proc.returnCode != 0
"""
return subprocess.Popen(
[
"git",
"--no-pager",
"diff",
"--no-ext-diff",
"--exit-code",
"-w",
first,
second,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
class LoggedTestCase(unittest.TestCase):
"""A test case that captures log messages."""
def setUp(self):
super().setUp()
self._logcount_handler = LogCountHandler()
logging.getLogger().addHandler(self._logcount_handler)
def tearDown(self):
logging.getLogger().removeHandler(self._logcount_handler)
super().tearDown()
def assertLogCountEqual(self, count=None, msg=None, **kwargs):
actual = self._logcount_handler.count_logs(msg=msg, **kwargs)
self.assertEqual(
actual,
count,
msg=f"expected {count} occurrences of {msg!r}, but found {actual}",
)
class TestCaseWithCLocale(unittest.TestCase):
"""Set locale to C for each test case, then restore afterward.
Use utils.temporary_locale if you want a context manager ("with" statement).
"""
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, "C")
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
| 7,270
|
Python
|
.py
| 219
| 25.780822
| 80
| 0.61007
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,236
|
test_readers.py
|
getpelican_pelican/pelican/tests/test_readers.py
|
import os
from unittest.mock import PropertyMock, patch
from pelican import readers
from pelican.tests.support import get_settings, unittest
from pelican.utils import SafeDatetime
CUR_DIR = os.path.dirname(__file__)
CONTENT_PATH = os.path.join(CUR_DIR, "content")
def _path(*args):
return os.path.join(CONTENT_PATH, *args)
class ReaderTest(unittest.TestCase):
def read_file(self, path, **kwargs):
# Isolate from future API changes to readers.read_file
r = readers.Readers(settings=get_settings(**kwargs))
return r.read_file(base_path=CONTENT_PATH, path=path)
def assertDictHasSubset(self, dictionary, subset):
for key, value in subset.items():
if key in dictionary:
real_value = dictionary.get(key)
self.assertEqual(
value,
real_value,
f"Expected {key} to have value {value}, but was {real_value}",
)
else:
self.fail(f"Expected {key} to have value {value}, but was not in Dict")
def test_markdown_disabled(self):
with patch.object(
readers.MarkdownReader, "enabled", new_callable=PropertyMock
) as attr_mock:
attr_mock.return_value = False
readrs = readers.Readers(settings=get_settings())
self.assertEqual(
set(readers.MarkdownReader.file_extensions),
readrs.disabled_readers.keys(),
)
for val in readrs.disabled_readers.values():
self.assertEqual(readers.MarkdownReader, val.__class__)
class TestAssertDictHasSubset(ReaderTest):
def setUp(self):
self.dictionary = {"key-a": "val-a", "key-b": "val-b"}
def tearDown(self):
self.dictionary = None
def test_subset(self):
self.assertDictHasSubset(self.dictionary, {"key-a": "val-a"})
def test_equal(self):
self.assertDictHasSubset(self.dictionary, self.dictionary)
def test_fail_not_set(self):
self.assertRaisesRegex(
AssertionError,
r"Expected.*key-c.*to have value.*val-c.*but was not in Dict",
self.assertDictHasSubset,
self.dictionary,
{"key-c": "val-c"},
)
def test_fail_wrong_val(self):
self.assertRaisesRegex(
AssertionError,
r"Expected .*key-a.* to have value .*val-b.* but was .*val-a.*",
self.assertDictHasSubset,
self.dictionary,
{"key-a": "val-b"},
)
class DefaultReaderTest(ReaderTest):
def test_readfile_unknown_extension(self):
with self.assertRaises(TypeError):
self.read_file(path="article_with_metadata.unknownextension")
def test_readfile_path_metadata_implicit_dates(self):
test_file = "article_with_metadata_implicit_dates.html"
page = self.read_file(path=test_file, DEFAULT_DATE="fs")
expected = {
"date": SafeDatetime.fromtimestamp(os.stat(_path(test_file)).st_mtime),
"modified": SafeDatetime.fromtimestamp(os.stat(_path(test_file)).st_mtime),
}
self.assertDictHasSubset(page.metadata, expected)
def test_readfile_path_metadata_explicit_dates(self):
test_file = "article_with_metadata_explicit_dates.html"
page = self.read_file(path=test_file, DEFAULT_DATE="fs")
expected = {
"date": SafeDatetime(2010, 12, 2, 10, 14),
"modified": SafeDatetime(2010, 12, 31, 23, 59),
}
self.assertDictHasSubset(page.metadata, expected)
def test_readfile_path_metadata_implicit_date_explicit_modified(self):
test_file = "article_with_metadata_implicit_date_explicit_modified.html"
page = self.read_file(path=test_file, DEFAULT_DATE="fs")
expected = {
"date": SafeDatetime.fromtimestamp(os.stat(_path(test_file)).st_mtime),
"modified": SafeDatetime(2010, 12, 2, 10, 14),
}
self.assertDictHasSubset(page.metadata, expected)
def test_readfile_path_metadata_explicit_date_implicit_modified(self):
test_file = "article_with_metadata_explicit_date_implicit_modified.html"
page = self.read_file(path=test_file, DEFAULT_DATE="fs")
expected = {
"date": SafeDatetime(2010, 12, 2, 10, 14),
"modified": SafeDatetime.fromtimestamp(os.stat(_path(test_file)).st_mtime),
}
self.assertDictHasSubset(page.metadata, expected)
def test_find_empty_alt(self):
with patch("pelican.readers.logger") as log_mock:
content = [
'<img alt="" src="test-image.png" width="300px" />',
'<img src="test-image.png" width="300px" alt="" />',
]
for tag in content:
readers.find_empty_alt(tag, "/test/path")
log_mock.warning.assert_called_with(
"Empty alt attribute for image %s in %s",
"test-image.png",
"/test/path",
extra={"limit_msg": "Other images have empty alt attributes"},
)
class RstReaderTest(ReaderTest):
def test_article_with_metadata(self):
page = self.read_file(path="article_with_metadata.rst")
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"title": "This is a super article !",
"summary": '<p class="first last">Multi-line metadata should be'
" supported\nas well as <strong>inline"
" markup</strong> and stuff to "typogrify"
""...</p>\n",
"date": SafeDatetime(2010, 12, 2, 10, 14),
"modified": SafeDatetime(2010, 12, 2, 10, 20),
"tags": ["foo", "bar", "foobar"],
"custom_field": "http://notmyidea.org",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_capitalized_metadata(self):
page = self.read_file(path="article_with_capitalized_metadata.rst")
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"title": "This is a super article !",
"summary": '<p class="first last">Multi-line metadata should be'
" supported\nas well as <strong>inline"
" markup</strong> and stuff to "typogrify"
""...</p>\n",
"date": SafeDatetime(2010, 12, 2, 10, 14),
"modified": SafeDatetime(2010, 12, 2, 10, 20),
"tags": ["foo", "bar", "foobar"],
"custom_field": "http://notmyidea.org",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_filename_metadata(self):
page = self.read_file(
path="2012-11-29_rst_w_filename_meta#foo-bar.rst", FILENAME_METADATA=None
)
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"title": "Rst with filename metadata",
"reader": "rst",
}
self.assertDictHasSubset(page.metadata, expected)
page = self.read_file(
path="2012-11-29_rst_w_filename_meta#foo-bar.rst",
FILENAME_METADATA=r"(?P<date>\d{4}-\d{2}-\d{2}).*",
)
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"title": "Rst with filename metadata",
"date": SafeDatetime(2012, 11, 29),
"reader": "rst",
}
self.assertDictHasSubset(page.metadata, expected)
page = self.read_file(
path="2012-11-29_rst_w_filename_meta#foo-bar.rst",
FILENAME_METADATA=(
r"(?P<date>\d{4}-\d{2}-\d{2})"
r"_(?P<Slug>.*)"
r"#(?P<MyMeta>.*)-(?P<author>.*)"
),
)
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"title": "Rst with filename metadata",
"date": SafeDatetime(2012, 11, 29),
"slug": "rst_w_filename_meta",
"mymeta": "foo",
"reader": "rst",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_optional_filename_metadata(self):
page = self.read_file(
path="2012-11-29_rst_w_filename_meta#foo-bar.rst",
FILENAME_METADATA=r"(?P<date>\d{4}-\d{2}-\d{2})?",
)
expected = {
"date": SafeDatetime(2012, 11, 29),
"reader": "rst",
}
self.assertDictHasSubset(page.metadata, expected)
page = self.read_file(
path="article.rst", FILENAME_METADATA=r"(?P<date>\d{4}-\d{2}-\d{2})?"
)
expected = {
"reader": "rst",
}
self.assertDictHasSubset(page.metadata, expected)
self.assertNotIn("date", page.metadata, "Date should not be set.")
def test_article_metadata_key_lowercase(self):
# Keys of metadata should be lowercase.
reader = readers.RstReader(settings=get_settings())
content, metadata = reader.read(_path("article_with_uppercase_metadata.rst"))
self.assertIn("category", metadata, "Key should be lowercase.")
self.assertEqual("Yeah", metadata.get("category"), "Value keeps case.")
def test_article_extra_path_metadata(self):
input_with_metadata = "2012-11-29_rst_w_filename_meta#foo-bar.rst"
page_metadata = self.read_file(
path=input_with_metadata,
FILENAME_METADATA=(
r"(?P<date>\d{4}-\d{2}-\d{2})"
r"_(?P<Slug>.*)"
r"#(?P<MyMeta>.*)-(?P<author>.*)"
),
EXTRA_PATH_METADATA={
input_with_metadata: {"key-1a": "value-1a", "key-1b": "value-1b"}
},
)
expected_metadata = {
"category": "yeah",
"author": "Alexis Métaireau",
"title": "Rst with filename metadata",
"date": SafeDatetime(2012, 11, 29),
"slug": "rst_w_filename_meta",
"mymeta": "foo",
"reader": "rst",
"key-1a": "value-1a",
"key-1b": "value-1b",
}
self.assertDictHasSubset(page_metadata.metadata, expected_metadata)
input_file_path_without_metadata = "article.rst"
page_without_metadata = self.read_file(
path=input_file_path_without_metadata,
EXTRA_PATH_METADATA={
input_file_path_without_metadata: {"author": "Charlès Overwrite"}
},
)
expected_without_metadata = {
"category": "misc",
"author": "Charlès Overwrite",
"title": "Article title",
"reader": "rst",
}
self.assertDictHasSubset(
page_without_metadata.metadata, expected_without_metadata
)
def test_article_extra_path_metadata_dont_overwrite(self):
# EXTRA_PATH_METADATA['author'] should get ignored
# since we don't overwrite already set values
input_file_path = "2012-11-29_rst_w_filename_meta#foo-bar.rst"
page = self.read_file(
path=input_file_path,
FILENAME_METADATA=(
r"(?P<date>\d{4}-\d{2}-\d{2})"
r"_(?P<Slug>.*)"
r"#(?P<MyMeta>.*)-(?P<orginalauthor>.*)"
),
EXTRA_PATH_METADATA={
input_file_path: {"author": "Charlès Overwrite", "key-1b": "value-1b"}
},
)
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"title": "Rst with filename metadata",
"date": SafeDatetime(2012, 11, 29),
"slug": "rst_w_filename_meta",
"mymeta": "foo",
"reader": "rst",
"key-1b": "value-1b",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_extra_path_metadata_recurse(self):
parent = "TestCategory"
notparent = "TestCategory/article"
path = "TestCategory/article_without_category.rst"
epm = {
parent: {
"epmr_inherit": parent,
"epmr_override": parent,
},
notparent: {"epmr_bogus": notparent},
path: {
"epmr_override": path,
},
}
expected_metadata = {
"epmr_inherit": parent,
"epmr_override": path,
}
page = self.read_file(path=path, EXTRA_PATH_METADATA=epm)
self.assertDictHasSubset(page.metadata, expected_metadata)
# Make sure vars aren't getting "inherited" by mistake...
path = "article.rst"
page = self.read_file(path=path, EXTRA_PATH_METADATA=epm)
for k in expected_metadata.keys():
self.assertNotIn(k, page.metadata)
# Same, but for edge cases where one file's name is a prefix of
# another.
path = "TestCategory/article_without_category.rst"
page = self.read_file(path=path, EXTRA_PATH_METADATA=epm)
for k in epm[notparent].keys():
self.assertNotIn(k, page.metadata)
def test_typogrify(self):
# if nothing is specified in the settings, the content should be
# unmodified
page = self.read_file(path="article.rst")
expected = (
"<p>THIS is some content. With some stuff to "
""typogrify"...</p>\n<p>Now with added "
'support for <abbr title="three letter acronym">'
"TLA</abbr>.</p>\n"
)
self.assertEqual(page.content, expected)
try:
# otherwise, typogrify should be applied
page = self.read_file(path="article.rst", TYPOGRIFY=True)
expected = (
'<p><span class="caps">THIS</span> is some content. '
"With some stuff to “typogrify”…</p>\n"
'<p>Now with added support for <abbr title="three letter '
'acronym"><span class="caps">TLA</span></abbr>.</p>\n'
)
self.assertEqual(page.content, expected)
except ImportError:
return unittest.skip("need the typogrify distribution")
def test_typogrify_summary(self):
# if nothing is specified in the settings, the summary should be
# unmodified
page = self.read_file(path="article_with_metadata.rst")
expected = (
'<p class="first last">Multi-line metadata should be'
" supported\nas well as <strong>inline"
" markup</strong> and stuff to "typogrify"
""...</p>\n"
)
self.assertEqual(page.metadata["summary"], expected)
try:
# otherwise, typogrify should be applied
page = self.read_file(path="article_with_metadata.rst", TYPOGRIFY=True)
expected = (
'<p class="first last">Multi-line metadata should be'
" supported\nas well as <strong>inline"
" markup</strong> and stuff to “typogrify"
"”…</p>\n"
)
self.assertEqual(page.metadata["summary"], expected)
except ImportError:
return unittest.skip("need the typogrify distribution")
def test_typogrify_ignore_tags(self):
try:
# typogrify should be able to ignore user specified tags,
# but tries to be clever with widont extension
page = self.read_file(
path="article.rst", TYPOGRIFY=True, TYPOGRIFY_IGNORE_TAGS=["p"]
)
expected = (
"<p>THIS is some content. With some stuff to "
""typogrify"...</p>\n<p>Now with added "
'support for <abbr title="three letter acronym">'
"TLA</abbr>.</p>\n"
)
self.assertEqual(page.content, expected)
# typogrify should ignore code blocks by default because
# code blocks are composed inside the pre tag
page = self.read_file(path="article_with_code_block.rst", TYPOGRIFY=True)
expected = (
"<p>An article with some code</p>\n"
'<div class="highlight"><pre><span></span>'
'<span class="n">x</span>'
' <span class="o">&</span>'
' <span class="n">y</span>\n</pre></div>\n'
"<p>A block quote:</p>\n<blockquote>\nx "
'<span class="amp">&</span> y</blockquote>\n'
"<p>Normal:\nx"
' <span class="amp">&</span>'
" y"
"</p>\n"
)
self.assertEqual(page.content, expected)
# instruct typogrify to also ignore blockquotes
page = self.read_file(
path="article_with_code_block.rst",
TYPOGRIFY=True,
TYPOGRIFY_IGNORE_TAGS=["blockquote"],
)
expected = (
"<p>An article with some code</p>\n"
'<div class="highlight"><pre><span>'
'</span><span class="n">x</span>'
' <span class="o">&</span>'
' <span class="n">y</span>\n</pre></div>\n'
"<p>A block quote:</p>\n<blockquote>\nx "
"& y</blockquote>\n"
"<p>Normal:\nx"
' <span class="amp">&</span>'
" y"
"</p>\n"
)
self.assertEqual(page.content, expected)
except ImportError:
return unittest.skip("need the typogrify distribution")
except TypeError:
return unittest.skip("need typogrify version 2.0.4 or later")
def test_article_with_multiple_authors(self):
page = self.read_file(path="article_with_multiple_authors.rst")
expected = {"authors": ["First Author", "Second Author"]}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_multiple_authors_semicolon(self):
page = self.read_file(path="article_with_multiple_authors_semicolon.rst")
expected = {"authors": ["Author, First", "Author, Second"]}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_multiple_authors_list(self):
page = self.read_file(path="article_with_multiple_authors_list.rst")
expected = {"authors": ["Author, First", "Author, Second"]}
self.assertDictHasSubset(page.metadata, expected)
def test_default_date_formats(self):
tuple_date = self.read_file(path="article.rst", DEFAULT_DATE=(2012, 5, 1))
string_date = self.read_file(path="article.rst", DEFAULT_DATE="2012-05-01")
self.assertEqual(tuple_date.metadata["date"], string_date.metadata["date"])
def test_parse_error(self):
# Verify that it raises an Exception, not nothing and not SystemExit or
# some such
with self.assertRaisesRegex(Exception, "underline too short"):
self.read_file(path="../parse_error/parse_error.rst")
def test_typogrify_dashes_config(self):
# Test default config
page = self.read_file(
path="article_with_typogrify_dashes.rst",
TYPOGRIFY=True,
TYPOGRIFY_DASHES="default",
)
expected = "<p>One: -; Two: —; Three: —-</p>\n"
expected_title = "One -, two —, three —- dashes!"
self.assertEqual(page.content, expected)
self.assertEqual(page.title, expected_title)
# Test 'oldschool' variant
page = self.read_file(
path="article_with_typogrify_dashes.rst",
TYPOGRIFY=True,
TYPOGRIFY_DASHES="oldschool",
)
expected = "<p>One: -; Two: –; Three: —</p>\n"
expected_title = "One -, two –, three — dashes!"
self.assertEqual(page.content, expected)
self.assertEqual(page.title, expected_title)
# Test 'oldschool_inverted' variant
page = self.read_file(
path="article_with_typogrify_dashes.rst",
TYPOGRIFY=True,
TYPOGRIFY_DASHES="oldschool_inverted",
)
expected = "<p>One: -; Two: —; Three: –</p>\n"
expected_title = "One -, two —, three – dashes!"
self.assertEqual(page.content, expected)
self.assertEqual(page.title, expected_title)
@unittest.skipUnless(readers.Markdown, "markdown isn't installed")
class MdReaderTest(ReaderTest):
def test_article_with_metadata(self):
reader = readers.MarkdownReader(settings=get_settings())
content, metadata = reader.read(_path("article_with_md_extension.md"))
expected = {
"category": "test",
"title": "Test md File",
"summary": "<p>I have a lot to test</p>",
"date": SafeDatetime(2010, 12, 2, 10, 14),
"modified": SafeDatetime(2010, 12, 2, 10, 20),
"tags": ["foo", "bar", "foobar"],
}
self.assertDictHasSubset(metadata, expected)
content, metadata = reader.read(
_path("article_with_markdown_and_nonascii_summary.md")
)
expected = {
"title": "マックOS X 10.8でパイソンとVirtualenvをインストールと設定",
"summary": "<p>パイソンとVirtualenvをまっくでインストールする方法について明確に説明します。</p>",
"category": "指導書",
"date": SafeDatetime(2012, 12, 20),
"modified": SafeDatetime(2012, 12, 22),
"tags": ["パイソン", "マック"],
"slug": "python-virtualenv-on-mac-osx-mountain-lion-10.8",
}
self.assertDictHasSubset(metadata, expected)
def test_article_with_footnote(self):
settings = get_settings()
ec = settings["MARKDOWN"]["extension_configs"]
ec["markdown.extensions.footnotes"] = {"SEPARATOR": "-"}
reader = readers.MarkdownReader(settings)
content, metadata = reader.read(_path("article_with_markdown_and_footnote.md"))
expected_content = (
"<p>This is some content"
'<sup id="fnref-1"><a class="footnote-ref" href="#fn-1"'
">1</a></sup>"
" with some footnotes"
'<sup id="fnref-footnote"><a class="footnote-ref" '
'href="#fn-footnote">2</a></sup></p>\n'
'<div class="footnote">\n'
'<hr>\n<ol>\n<li id="fn-1">\n'
"<p>Numbered footnote "
'<a class="footnote-backref" href="#fnref-1" '
'title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n<li id="fn-footnote">\n'
"<p>Named footnote "
'<a class="footnote-backref" href="#fnref-footnote"'
' title="Jump back to footnote 2 in the text">↩</a></p>\n'
"</li>\n</ol>\n</div>"
)
expected_metadata = {
"title": "Article with markdown containing footnotes",
"summary": (
"<p>Summary with <strong>inline</strong> markup "
"<em>should</em> be supported.</p>"
),
"date": SafeDatetime(2012, 10, 31),
"modified": SafeDatetime(2012, 11, 1),
"multiline": [
"Line Metadata should be handle properly.",
"See syntax of Meta-Data extension of Python Markdown package:",
"If a line is indented by 4 or more spaces,",
"that line is assumed to be an additional line of the value",
"for the previous keyword.",
"A keyword may have as many lines as desired.",
],
}
self.assertEqual(content, expected_content)
self.assertDictHasSubset(metadata, expected_metadata)
def test_article_with_file_extensions(self):
reader = readers.MarkdownReader(settings=get_settings())
# test to ensure the md file extension is being processed by the
# correct reader
content, metadata = reader.read(_path("article_with_md_extension.md"))
expected = (
"<h1>Test Markdown File Header</h1>\n"
"<h2>Used for pelican test</h2>\n"
"<p>The quick brown fox jumped over the lazy dog's back.</p>"
)
self.assertEqual(content, expected)
# test to ensure the mkd file extension is being processed by the
# correct reader
content, metadata = reader.read(_path("article_with_mkd_extension.mkd"))
expected = (
"<h1>Test Markdown File Header</h1>\n<h2>Used for pelican"
" test</h2>\n<p>This is another markdown test file. Uses"
" the mkd extension.</p>"
)
self.assertEqual(content, expected)
# test to ensure the markdown file extension is being processed by the
# correct reader
content, metadata = reader.read(
_path("article_with_markdown_extension.markdown")
)
expected = (
"<h1>Test Markdown File Header</h1>\n<h2>Used for pelican"
" test</h2>\n<p>This is another markdown test file. Uses"
" the markdown extension.</p>"
)
self.assertEqual(content, expected)
# test to ensure the mdown file extension is being processed by the
# correct reader
content, metadata = reader.read(_path("article_with_mdown_extension.mdown"))
expected = (
"<h1>Test Markdown File Header</h1>\n<h2>Used for pelican"
" test</h2>\n<p>This is another markdown test file. Uses"
" the mdown extension.</p>"
)
self.assertEqual(content, expected)
def test_article_with_markdown_markup_extension(self):
# test to ensure the markdown markup extension is being processed as
# expected
page = self.read_file(
path="article_with_markdown_markup_extensions.md",
MARKDOWN={
"extension_configs": {
"markdown.extensions.toc": {},
"markdown.extensions.codehilite": {},
"markdown.extensions.extra": {},
}
},
)
expected = (
'<div class="toc">\n'
"<ul>\n"
'<li><a href="#level1">Level1</a><ul>\n'
'<li><a href="#level2">Level2</a></li>\n'
"</ul>\n"
"</li>\n"
"</ul>\n"
"</div>\n"
'<h2 id="level1">Level1</h2>\n'
'<h3 id="level2">Level2</h3>'
)
self.assertEqual(page.content, expected)
def test_article_with_filename_metadata(self):
page = self.read_file(
path="2012-11-30_md_w_filename_meta#foo-bar.md", FILENAME_METADATA=None
)
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
}
self.assertDictHasSubset(page.metadata, expected)
page = self.read_file(
path="2012-11-30_md_w_filename_meta#foo-bar.md",
FILENAME_METADATA=r"(?P<date>\d{4}-\d{2}-\d{2}).*",
)
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"date": SafeDatetime(2012, 11, 30),
}
self.assertDictHasSubset(page.metadata, expected)
page = self.read_file(
path="2012-11-30_md_w_filename_meta#foo-bar.md",
FILENAME_METADATA=(
r"(?P<date>\d{4}-\d{2}-\d{2})"
r"_(?P<Slug>.*)"
r"#(?P<MyMeta>.*)-(?P<author>.*)"
),
)
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"date": SafeDatetime(2012, 11, 30),
"slug": "md_w_filename_meta",
"mymeta": "foo",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_optional_filename_metadata(self):
page = self.read_file(
path="2012-11-30_md_w_filename_meta#foo-bar.md",
FILENAME_METADATA=r"(?P<date>\d{4}-\d{2}-\d{2})?",
)
expected = {
"date": SafeDatetime(2012, 11, 30),
"reader": "markdown",
}
self.assertDictHasSubset(page.metadata, expected)
page = self.read_file(
path="empty.md", FILENAME_METADATA=r"(?P<date>\d{4}-\d{2}-\d{2})?"
)
expected = {
"reader": "markdown",
}
self.assertDictHasSubset(page.metadata, expected)
self.assertNotIn("date", page.metadata, "Date should not be set.")
def test_duplicate_tags_or_authors_are_removed(self):
reader = readers.MarkdownReader(settings=get_settings())
content, metadata = reader.read(_path("article_with_duplicate_tags_authors.md"))
expected = {
"tags": ["foo", "bar", "foobar"],
"authors": ["Author, First", "Author, Second"],
}
self.assertDictHasSubset(metadata, expected)
def test_metadata_not_parsed_for_metadata(self):
settings = get_settings()
settings["FORMATTED_FIELDS"] = ["summary"]
reader = readers.MarkdownReader(settings=settings)
content, metadata = reader.read(
_path("article_with_markdown_and_nested_metadata.md")
)
expected = {
"title": "Article with markdown and nested summary metadata",
"summary": "<p>Test: This metadata value looks like metadata</p>",
}
self.assertDictHasSubset(metadata, expected)
def test_empty_file(self):
reader = readers.MarkdownReader(settings=get_settings())
content, metadata = reader.read(_path("empty.md"))
self.assertEqual(metadata, {})
self.assertEqual(content, "")
def test_empty_file_with_bom(self):
reader = readers.MarkdownReader(settings=get_settings())
content, metadata = reader.read(_path("empty_with_bom.md"))
self.assertEqual(metadata, {})
self.assertEqual(content, "")
def test_typogrify_dashes_config(self):
# Test default config
page = self.read_file(
path="article_with_typogrify_dashes.md",
TYPOGRIFY=True,
TYPOGRIFY_DASHES="default",
)
expected = "<p>One: -; Two: —; Three: —-</p>"
expected_title = "One -, two —, three —- dashes!"
self.assertEqual(page.content, expected)
self.assertEqual(page.title, expected_title)
# Test 'oldschool' variant
page = self.read_file(
path="article_with_typogrify_dashes.md",
TYPOGRIFY=True,
TYPOGRIFY_DASHES="oldschool",
)
expected = "<p>One: -; Two: –; Three: —</p>"
expected_title = "One -, two –, three — dashes!"
self.assertEqual(page.content, expected)
self.assertEqual(page.title, expected_title)
# Test 'oldschool_inverted' variant
page = self.read_file(
path="article_with_typogrify_dashes.md",
TYPOGRIFY=True,
TYPOGRIFY_DASHES="oldschool_inverted",
)
expected = "<p>One: -; Two: —; Three: –</p>"
expected_title = "One -, two —, three – dashes!"
self.assertEqual(page.content, expected)
self.assertEqual(page.title, expected_title)
def test_metadata_has_no_discarded_data(self):
md_filename = "article_with_markdown_and_empty_tags.md"
r = readers.Readers(
cache_name="cache", settings=get_settings(CACHE_CONTENT=True)
)
page = r.read_file(base_path=CONTENT_PATH, path=md_filename)
__, cached_metadata = r.get_cached_data(_path(md_filename), (None, None))
expected = {"title": "Article with markdown and empty tags"}
self.assertEqual(cached_metadata, expected)
self.assertNotIn("tags", page.metadata)
self.assertDictHasSubset(page.metadata, expected)
class HTMLReaderTest(ReaderTest):
def test_article_with_comments(self):
page = self.read_file(path="article_with_comments.html")
self.assertEqual(
"""
Body content
<!-- This comment is included (including extra whitespace) -->
""",
page.content,
)
def test_article_with_keywords(self):
page = self.read_file(path="article_with_keywords.html")
expected = {
"tags": ["foo", "bar", "foobar"],
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_metadata(self):
page = self.read_file(path="article_with_metadata.html")
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"title": "This is a super article !",
"summary": "Summary and stuff",
"date": SafeDatetime(2010, 12, 2, 10, 14),
"tags": ["foo", "bar", "foobar"],
"custom_field": "http://notmyidea.org",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_multiple_similar_metadata_tags(self):
page = self.read_file(path="article_with_multiple_metadata_tags.html")
expected = {
"custom_field": ["https://getpelican.com", "https://www.eff.org"],
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_multiple_authors(self):
page = self.read_file(path="article_with_multiple_authors.html")
expected = {"authors": ["First Author", "Second Author"]}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_metadata_and_contents_attrib(self):
page = self.read_file(path="article_with_metadata_and_contents.html")
expected = {
"category": "yeah",
"author": "Alexis Métaireau",
"title": "This is a super article !",
"summary": "Summary and stuff",
"date": SafeDatetime(2010, 12, 2, 10, 14),
"tags": ["foo", "bar", "foobar"],
"custom_field": "http://notmyidea.org",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_null_attributes(self):
page = self.read_file(path="article_with_null_attributes.html")
self.assertEqual(
"""
Ensure that empty attributes are copied properly.
<input name="test" disabled style="" />
""",
page.content,
)
def test_article_with_attributes_containing_double_quotes(self):
page = self.read_file(
path="article_with_attributes_containing_" + "double_quotes.html"
)
self.assertEqual(
"""
Ensure that if an attribute value contains a double quote, it is
surrounded with single quotes, otherwise with double quotes.
<span data-test="'single quoted string'">Span content</span>
<span data-test='"double quoted string"'>Span content</span>
<span data-test="string without quotes">Span content</span>
""",
page.content,
)
def test_article_metadata_key_lowercase(self):
# Keys of metadata should be lowercase.
page = self.read_file(path="article_with_uppercase_metadata.html")
# Key should be lowercase
self.assertIn("category", page.metadata, "Key should be lowercase.")
# Value should keep cases
self.assertEqual("Yeah", page.metadata.get("category"))
def test_article_with_nonconformant_meta_tags(self):
page = self.read_file(path="article_with_nonconformant_meta_tags.html")
expected = {
"summary": "Summary and stuff",
"title": "Article with Nonconformant HTML meta tags",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_inline_svg(self):
page = self.read_file(path="article_with_inline_svg.html")
expected = {
"title": "Article with an inline SVG",
}
self.assertDictHasSubset(page.metadata, expected)
| 36,563
|
Python
|
.py
| 815
| 33.580368
| 88
| 0.572604
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,237
|
__init__.py
|
getpelican_pelican/pelican/tests/dummy_plugins/normal_plugin/normal_plugin/__init__.py
|
from .submodule import noop # noqa: F401
def register():
pass
| 69
|
Python
|
.py
| 3
| 20
| 41
| 0.71875
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,238
|
conftest.py
|
getpelican_pelican/pelican/tests/build_test/conftest.py
|
def pytest_addoption(parser):
parser.addoption(
"--check-build",
action="store",
default=False,
help="Check wheel contents.",
)
| 168
|
Python
|
.py
| 7
| 17.285714
| 37
| 0.583851
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,239
|
test_build_files.py
|
getpelican_pelican/pelican/tests/build_test/test_build_files.py
|
import importlib.metadata
import tarfile
from pathlib import Path
from re import match
from zipfile import ZipFile
import pytest
version = importlib.metadata.version("pelican")
@pytest.mark.skipif(
"not config.getoption('--check-build')",
reason="Only run when --check-build is given",
)
def test_wheel_contents(pytestconfig):
"""
This test should test the contents of the wheel to make sure
that everything that is needed is included in the final build
"""
dist_folder = pytestconfig.getoption("--check-build")
wheels = Path(dist_folder).rglob(f"pelican-{version}-py3-none-any.whl")
for wheel_file in wheels:
files_list = ZipFile(wheel_file).namelist()
# Check if theme files are copied to wheel
simple_theme = Path("./pelican/themes/simple/templates")
for x in simple_theme.iterdir():
assert str(x) in files_list
# Check if tool templates are copied to wheel
tools = Path("./pelican/tools/templates")
for x in tools.iterdir():
assert str(x) in files_list
assert "pelican/tools/templates/tasks.py.jinja2" in files_list
@pytest.mark.skipif(
"not config.getoption('--check-build')",
reason="Only run when --check-build is given",
)
@pytest.mark.parametrize(
"expected_file",
[
("THANKS"),
("README.rst"),
("CONTRIBUTING.rst"),
("docs/changelog.rst"),
("samples/"),
],
)
def test_sdist_contents(pytestconfig, expected_file):
"""
This test should test the contents of the source distribution to make sure
that everything that is needed is included in the final build.
"""
dist_folder = pytestconfig.getoption("--check-build")
sdist_files = Path(dist_folder).rglob(f"pelican-{version}.tar.gz")
for dist in sdist_files:
files_list = tarfile.open(dist, "r:gz").getnames()
dir_matcher = ""
if expected_file.endswith("/"):
dir_matcher = ".*"
filtered_values = [
path
for path in files_list
if match(rf"^pelican-{version}/{expected_file}{dir_matcher}$", path)
]
assert len(filtered_values) > 0
| 2,201
|
Python
|
.py
| 61
| 29.901639
| 80
| 0.655722
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,240
|
signals.py
|
getpelican_pelican/pelican/plugins/signals.py
|
from blinker import Signal, signal
from ordered_set import OrderedSet
# Signals will call functions in the order of connection, i.e. plugin order
Signal.set_class = OrderedSet
# Run-level signals:
initialized = signal("pelican_initialized")
get_generators = signal("get_generators")
all_generators_finalized = signal("all_generators_finalized")
get_writer = signal("get_writer")
finalized = signal("pelican_finalized")
# Reader-level signals
readers_init = signal("readers_init")
# Generator-level signals
generator_init = signal("generator_init")
article_generator_init = signal("article_generator_init")
article_generator_pretaxonomy = signal("article_generator_pretaxonomy")
article_generator_finalized = signal("article_generator_finalized")
article_generator_write_article = signal("article_generator_write_article")
article_writer_finalized = signal("article_writer_finalized")
page_generator_init = signal("page_generator_init")
page_generator_finalized = signal("page_generator_finalized")
page_generator_write_page = signal("page_generator_write_page")
page_writer_finalized = signal("page_writer_finalized")
static_generator_init = signal("static_generator_init")
static_generator_finalized = signal("static_generator_finalized")
# Page-level signals
article_generator_preread = signal("article_generator_preread")
article_generator_context = signal("article_generator_context")
page_generator_preread = signal("page_generator_preread")
page_generator_context = signal("page_generator_context")
static_generator_preread = signal("static_generator_preread")
static_generator_context = signal("static_generator_context")
content_object_init = signal("content_object_init")
# Writers signals
content_written = signal("content_written")
feed_generated = signal("feed_generated")
feed_written = signal("feed_written")
| 1,839
|
Python
|
.py
| 37
| 48.27027
| 75
| 0.808511
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,241
|
_utils.py
|
getpelican_pelican/pelican/plugins/_utils.py
|
import importlib
import importlib.machinery
import importlib.util
import inspect
import logging
import pkgutil
import sys
logger = logging.getLogger(__name__)
def iter_namespace(ns_pkg):
# Specifying the second argument (prefix) to iter_modules makes the
# returned name an absolute name instead of a relative one. This allows
# import_module to work without having to do additional modification to
# the name.
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
def get_namespace_plugins(ns_pkg=None):
if ns_pkg is None:
import pelican.plugins as ns_pkg
return {
name: importlib.import_module(name)
for finder, name, ispkg in iter_namespace(ns_pkg)
if ispkg
}
def list_plugins(ns_pkg=None):
from pelican.log import init as init_logging
init_logging(logging.INFO)
ns_plugins = get_namespace_plugins(ns_pkg)
if ns_plugins:
logger.info("Plugins found:\n" + "\n".join(ns_plugins))
else:
logger.info("No plugins are installed")
def plugin_enabled(name, plugin_list=None):
if plugin_list is None or not plugin_list:
# no plugins are loaded
return False
if name in plugin_list:
# search name as is
return True
if f"pelican.plugins.{name}" in plugin_list:
# check if short name is a namespace plugin
return True
return False
def load_legacy_plugin(plugin, plugin_paths):
if "." in plugin:
# it is in a package, try to resolve package first
package, _, _ = plugin.rpartition(".")
load_legacy_plugin(package, plugin_paths)
# Try to find plugin in PLUGIN_PATHS
spec = importlib.machinery.PathFinder.find_spec(plugin, plugin_paths)
if spec is None:
# If failed, try to find it in normal importable locations
spec = importlib.util.find_spec(plugin)
if spec is None:
raise ImportError(f"Cannot import plugin `{plugin}`")
else:
# Avoid loading the same plugin twice
if spec.name in sys.modules:
return sys.modules[spec.name]
# create module object from spec
mod = importlib.util.module_from_spec(spec)
# place it into sys.modules cache
# necessary if module imports itself at some point (e.g. packages)
sys.modules[spec.name] = mod
try:
# try to execute it inside module object
spec.loader.exec_module(mod)
except Exception: # problem with import
try:
# remove module from sys.modules since it can't be loaded
del sys.modules[spec.name]
except KeyError:
pass
raise
# if all went well, we have the plugin module
return mod
def load_plugins(settings):
logger.debug("Finding namespace plugins")
namespace_plugins = get_namespace_plugins()
if namespace_plugins:
logger.debug("Namespace plugins found:\n" + "\n".join(namespace_plugins))
plugins = []
if settings.get("PLUGINS") is not None:
for plugin in settings["PLUGINS"]:
if isinstance(plugin, str):
logger.debug("Loading plugin `%s`", plugin)
# try to find in namespace plugins
if plugin in namespace_plugins:
plugin = namespace_plugins[plugin]
elif f"pelican.plugins.{plugin}" in namespace_plugins:
plugin = namespace_plugins[f"pelican.plugins.{plugin}"]
# try to import it
else:
try:
plugin = load_legacy_plugin(
plugin, settings.get("PLUGIN_PATHS", [])
)
except ImportError as e:
logger.error("Cannot load plugin `%s`\n%s", plugin, e)
continue
plugins.append(plugin)
else:
plugins = list(namespace_plugins.values())
return plugins
def get_plugin_name(plugin):
"""
Plugins can be passed as module objects, however this breaks caching as
module objects cannot be pickled. To work around this, all plugins are
stringified post-initialization.
"""
if inspect.isclass(plugin):
return plugin.__qualname__
if inspect.ismodule(plugin):
return plugin.__name__
return type(plugin).__qualname__
| 4,432
|
Python
|
.py
| 113
| 30.318584
| 81
| 0.628319
|
getpelican/pelican
| 12,478
| 1,806
| 72
|
AGPL-3.0
|
9/5/2024, 5:09:45 PM (Europe/Amsterdam)
|
6,242
|
setup.py
|
shinken-solutions_shinken/setup.py
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# We can't use future unicode_litteral in setup.py because versions of
# setuptools <= 41.1.0 do not manage unicode values in package_data.
# See https://github.com/pypa/setuptools/pull/1769 for details.
# from __future__ import absolute_import, division, print_function, unicode_literals
try:
import pwd
import grp
except ImportError:
# don't expect to have this on windows :)
pwd = grp = None
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
from distutils.core import Command
from itertools import chain
from glob import glob
import sys
import os
import re
try:
from setuptools import setup
from setuptools import find_packages
except:
sys.exit("Error: missing setuptools library")
try:
python_version = sys.version_info
except:
python_version = None
if not python_version or python_version < (2, 7):
sys.exit("Shinken requires Python >= 2.7.x, sorry")
###############################################################################
#
# Utility functions
#
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def update_file_with_string(infilename, outfilename, matches, new_strings):
"""
Replaces strings by regex from infilename to outfilename
:param str infilename: The input file to read and replace strings in
:param str outfilename: The output file to write to
:param list matches: The regex matches to replace
:param list new_strings: String replacement per regex
"""
with open(infilename, "rb") as f:
buf = []
for r in f:
r = r.decode("utf-8").strip()
for match, new_string in zip(matches, new_strings):
r = (re.sub(match, new_string, r))
buf.append(r)
with open(outfilename, "wb") as f:
f.write("\n".join(buf).encode("utf-8"))
def get_uid(username):
"""
Returns the username's uid, or None if it does not exist
:param str username: The username to look for
"""
try:
return pwd.getpwnam(username)[2]
except KeyError as exp:
return None
def get_gid(groupname):
"""
Returns the group's gid, or None if it does not exist
:param str groupname: The username to look for
"""
try:
return grp.getgrnam(groupname)[2]
except KeyError as exp:
return None
def get_init_system():
"""
Return the init system name
"""
if os.name == 'nt':
return None
if not os.path.isfile("/proc/1/comm"):
return "sysv"
with open("/proc/1/comm", "r") as f:
init = f.read().strip()
if init == "systemd":
return init
else:
return "sysv"
def get_requirements():
"""
Reads requirements file
"""
req_path = os.path.join(
os.path.dirname(__file__),
"requirements.txt"
)
with open(req_path, "r") as f:
requirements = [r.strip() for r in f if r.strip()]
return requirements
def get_shinken_version():
"""
Reads the shinken version
"""
version_path = os.path.join(
os.path.dirname(__file__),
"shinken",
"bin",
"__init__.py"
)
with open(version_path, "r") as f:
version = None
for r in f:
if "VERSION" in r:
version = r.split("=")[1].strip().strip('"')
break
if version is None:
raise Exception("Failed to read shinken version")
return version
###############################################################################
#
# Distribution files
#
###############################################################################
# Packages definition
package_data = ['*.py', 'modules/*.py', 'modules/*/*.py']
# Compute scripts
scripts = [s for s in glob('bin/shinken*') if not s.endswith('.py')]
###############################################################################
#
# Default paths
#
###############################################################################
shinken_services = [
'arbiter',
'broker',
'poller',
'reactionner',
'receiver',
'scheduler'
]
# Installation files processing
if os.path.isfile('/etc/redhat-release'):
default_paths = {
'sysv': "/etc/init.d",
'default': "/etc/sysconfig",
'libexec': "/usr/local/libexec/shinken/plugins",
'modules': "/usr/local/lib/shinken/modules",
'share': "/usr/local/share/shinken",
'examples': "/usr/local/share/doc/shinken/examples",
'doc': "/usr/local/share/doc/shinken",
'etc': "/etc/shinken",
'var': "/var/lib/shinken",
'run': "/var/run/shinken",
'log': "/var/log/shinken",
}
elif os.path.isfile('/etc/debian_version'):
default_paths = {
'sysv': "/etc/init.d",
'default': "/etc/default",
'libexec': "/usr/local/libexec/shinken/plugins",
'modules': "/usr/local/lib/shinken/modules",
'share': "/usr/local/share/shinken",
'examples': "/usr/local/share/doc/shinken/examples",
'doc': "/usr/local/share/doc/shinken",
'etc': "/etc/shinken",
'var': "/var/lib/shinken",
'run': "/var/run/shinken",
'log': "/var/log/shinken",
}
elif 'linux' in sys.platform or 'sunos5' in sys.platform:
default_paths = {
'sysv': "/etc/init.d",
'default': "/etc/default",
'libexec': "/usr/local/libexec/shinken/plugins",
'modules': "/usr/local/lib/shinken/modules",
'share': "/usr/local/share/shinken",
'examples': "/usr/local/share/doc/shinken/examples",
'doc': "/usr/local/share/doc/shinken",
'etc': "/etc/shinken",
'var': "/var/lib/shinken",
'run': "/var/run/shinken",
'log': "/var/log/shinken",
}
elif 'openbsd':
default_paths = {
'sysv': "/etc/rc.d",
'default': "/etc/default",
'libexec': "/usr/local/libexec/shinken/plugins",
'modules': "/usr/local/lib/shinken/modules",
'share': "/usr/local/share/shinken",
'examples': "/usr/local/share/examples/shinken",
'doc': "/usr/local/share/doc/shinken",
'etc': "/etc/shinken",
'var': "/var/lib/shinken",
'run': "/var/run/shinken",
'log': "/var/log/shinken",
}
elif 'bsd' in sys.platform or 'dragonfly' in sys.platform:
default_paths = {
'sysv': "/usr/local/etc/rc.d",
'default': "/etc/default",
'libexec': "/usr/local/libexec/shinken/plugins",
'modules': "/usr/local/lib/shinken/modules",
'share': "/usr/local/share/shinken",
'examples': "/usr/local/share/examples/shinken",
'doc': "/usr/local/share/doc/shinken",
'etc': "/etc/shinken",
'var': "/var/lib/shinken",
'run': "/var/run/shinken",
'log': "/var/log/shinken",
}
elif sys.platform.startswith('win'):
default_paths = {
'libexec': "c:\\shinken\\libexec",
'modules': "c:\\shinken\\var\\modules",
'var': "c:\\shinken\\var",
'share': "c:\\shinken\\var\\share",
'examples': "c:\\shinken\\var\\share\\examples",
'doc': "c:\\shinken\\var\\share\\doc",
'etc': "c:\\shinken\\etc",
'log': "c:\\shinken\\var",
'run': "c:\\shinken\\var",
}
else:
raise Exception("Unsupported platform, sorry")
if os.getenv("VIRTUAL_ENV"):
root = os.getenv("VIRTUAL_ENV")
default_paths.update({
'default': os.path.join(root, "etc", "default"),
'libexec': os.path.join(root, "libexec", "shinken", "plugins"),
'modules': os.path.join(root, "lib", "shinken", "modules"),
'share': os.path.join(root, "share", "shinken"),
'examples': os.path.join(root, "share", "doc", "shinken", "examples"),
'doc': os.path.join(root, "share", "doc", "shinken"),
'etc': os.path.join(root, "etc", "shinken"),
'var': os.path.join(root, "var", "lib", "shinken"),
'run': os.path.join(root, "var", "run", "shinken"),
'log': os.path.join(root, "var", "log", "shinken"),
})
###############################################################################
#
# Init related files
#
###############################################################################
if get_init_system() == "systemd":
init_files = [
'bin/systemd/shinken-%s.service.in' % service
for service in shinken_services
]
data_files = [(
os.path.join(default_paths['examples'], 'systemd'),
init_files
)]
default_files = [
'bin/default/shinken-%s.in' % service
for service in shinken_services
]
data_files.append((
os.path.join(default_paths['examples'], 'default'),
default_files
))
elif get_init_system() == "sysv":
init_files = ['bin/init.d/shinken.in']
init_files.extend([
'bin/init.d/shinken-%s' % service for service in shinken_services
])
data_files = [(
os.path.join(default_paths['examples'], 'init.d'),
init_files
)]
# warning: The default file will be generated a bit later
default_files = ['bin/default/shinken.in']
data_files.append((
os.path.join(default_paths['examples'], 'default'),
default_files
))
else:
data_files = []
###############################################################################
#
# Daemon and and shinken configuration files processing
#
###############################################################################
## get all files + under-files in etc/ except daemons folder
for path, subdirs, files in os.walk('etc'):
dirname = os.path.join(default_paths['examples'], path)
if not files:
data_files.append((dirname, []))
continue
for name in files:
data_files.append((dirname, [os.path.join(path, name)]))
###############################################################################
#
# Modules, inventory, doc, ...
#
###############################################################################
# Modules, doc, inventory and cli are always installed
paths = ('inventory', 'cli')
dist = {}
for path, subdirs, files in chain.from_iterable(os.walk(patho) for patho in paths):
for name in files:
dirname = os.path.join(default_paths['var'], path)
data_files.append((
dirname, [os.path.join(path, name)]
))
for path, subdirs, files in os.walk('share'):
for name in files:
dirname = os.path.dirname(os.path.join(
default_paths['share'],
re.sub(r"^(share\/|share$)", "", path)
))
data_files.append((
dirname, [os.path.join(path, name)]
))
for path, subdirs, files in os.walk('doc'):
for name in files:
dirname = os.path.dirname(os.path.join(
default_paths['doc'],
re.sub(r"^(doc\/|doc$)", "", path)
))
data_files.append((
dirname, [os.path.join(path, name)]
))
for path, subdirs, files in os.walk('modules'):
for name in files:
dirname = os.path.dirname(os.path.join(
default_paths['modules'],
re.sub(r"^(modules\/|modules$)", "", path)
))
data_files.append((
dirname, [os.path.join(path, name)]
))
for path, subdirs, files in os.walk('libexec'):
for name in files:
dirname = os.path.dirname(os.path.join(
default_paths['libexec'],
re.sub(r"^(libexec\/|libexec$)", "", path)
))
data_files.append((
dirname, [os.path.join(path, name)]
))
###############################################################################
#
# Run related files
#
###############################################################################
data_files.append((default_paths['run'], []))
data_files.append((default_paths['log'], []))
###############################################################################
#
# Post install command and actions
#
###############################################################################
class post_install(Command):
"""
A custom command to execute post-install actions
"""
description = 'Run shinken post-install actions, such as templates ' \
'processing and permissions enforcement'
user_options = [
# The format is (long option, short option, description).
('install-conf', None, 'Install shinken configuration from examples'),
('install-default', None, 'Install shinken default files from examples'),
('install-init', None, 'Install shinken init files from examples'),
(
'confdir=',
'c',
'The configuration directory to alter (defaults to %s)' %
default_paths['etc']
),
(
'defaultdir=',
'f',
'The environment files director for init system (defaults to %s)' %
default_paths['default']
),
('user=', 'u', 'User to run Shinken under (defaults to shinken)'),
('group=', 'g', 'User to run Shinken under (defaults to shinken)'),
(
'modules=',
'm',
'Path the modules should be placed into (defaults to %s)' %
default_paths['modules']
),
(
'workdir=',
'w',
'The shinken work directory (defaults to %s)' %
default_paths['var']
),
(
'lockdir=',
'x',
'The shinken service lock directory (defaults to %s)' %
default_paths['run']
),
(
'logdir=',
'l',
'The shinken log directory (defaults to %s)' %
default_paths['log']
),
]
boolean_options = ['install-conf', 'install-default', 'install-init']
def initialize_options(self):
"""
Set default values for options.
"""
# Each user option must be listed here with their default value.
self.install_dir = None
self.install_conf = None
self.install_default = None
self.install_init = None
self.user = 'shinken'
self.group = 'shinken'
self.confdir = default_paths['etc']
self.defaultdir = default_paths['default']
self.modules = default_paths['modules']
self.workdir = default_paths['var']
self.lockdir = default_paths['run']
self.logdir = default_paths['log']
def finalize_options(self):
"""
Post-process options.
"""
assert get_uid(self.user) is not None, ('Unknown user %s.' % self.user)
assert get_gid(self.group) is not None, ('Unknown group %s.' % self.group)
self.set_undefined_options(
'install', ('install_scripts', 'install_dir'),
)
def generate_default_files(self):
"""
Generate default/environment files sourced by init scripts or
systemd unit files from templates
"""
# The default file must have good values for the directories:
# etc, var and where to push scripts that launch the app.
# The `default_files` variable has been set above while genetating the
# `data_files` list.
default_templates = [
os.path.join(default_paths['examples'], re.sub(r'^bin/', '', d))
for d in default_files
]
for default_template in default_templates:
# Read the template file
# There can be unicode characters in files
# As setuptools does not support unicode in python2, for 2/3
# compatibility, read files in binary and decode them in unicode
# Do the contrary to write them.
with open(default_template, "rb") as f:
buf = f.read().decode("utf-8")
# substitute
buf = buf.replace("$ETC$", self.confdir)
buf = buf.replace("$VAR$", self.workdir)
buf = buf.replace("$RUN$", self.lockdir)
buf = buf.replace("$LOG$", self.logdir)
buf = buf.replace("$SCRIPTS_BIN$", self.install_dir.rstrip("/"))
# write out the new file
target = re.sub(r'\.in$', '', default_template)
with open(target, "wb") as f:
f.write(buf.encode("utf-8"))
def install_default_files(self):
"""
Install default/environment files sourced by init scripts or
systemd unit files previously generated
"""
for filename in [os.path.basename(i) for i in default_files]:
default_src = re.sub(r'\.in$', '', os.path.join(
default_paths['examples'],
'default',
filename))
default_dir = self.defaultdir
self.mkpath(default_dir)
self.copy_file(default_src, default_dir)
def generate_init_files(self):
"""
Generates the initscripts or systemd unit files from templates
"""
init_templates = [
os.path.join(default_paths['examples'], re.sub(r'^bin/', '', i))
for i in init_files
]
for init_template in init_templates:
# Read the template file
# There can be unicode characters in files
# As setuptools does not support unicode in python2, for 2/3
# compatibility, read files in binary and decode them in unicode
# Do the contrary to write them.
with open(init_template, "rb") as f:
buf = f.read().decode("utf-8")
# substitute
buf = buf.replace("$BIN$", self.install_dir.rstrip("/"))
buf = buf.replace("$DEFAULT$", default_paths["default"])
# write out the new file
target = re.sub(r'\.in$', '', init_template)
with open(target, "wb") as f:
f.write(buf.encode("utf-8"))
def install_init_files(self):
"""
Installs the init scripts or systemd unit files. When unit files
get modified, takes care to reload daemon files.
"""
systemd_reload = False
for filename in [os.path.basename(i) for i in init_files]:
if get_init_system() == "systemd":
systemd_reload = True
init_src = re.sub(r'\.in$', '', os.path.join(
default_paths['examples'],
'systemd',
filename))
init_dir = '/etc/systemd/system'
self.mkpath(init_dir)
self.copy_file(init_src, init_dir)
elif get_init_system() == "sysv":
init_src = re.sub(r'\.in$', '', os.path.join(
default_paths['examples'],
'init.d',
filename))
init_dir = default_paths['sysv']
self.mkpath(init_dir)
init_file = re.sub(r'\.in$', '', os.path.join(
init_dir,
filename))
self.copy_file(init_src, init_dir)
os.chmod(init_file, 0o0755)
if systemd_reload:
self.spawn(["systemctl", "daemon-reload"])
def generate_conf_files(self):
"""
Generates shinken configuration files from templates
"""
conf_templates = []
conf_base = os.path.join(default_paths['examples'], 'etc')
for path, subdirs, files in os.walk(conf_base):
for name in files:
if name.endswith(".in"):
conf_template = os.path.join(path, name)
conf_templates.append(conf_template)
# Processes template files expansion
for conf_template in conf_templates:
target = re.sub(r'\.in$', '', conf_template)
update_file_with_string(
conf_template,
target,
[
r'^modules_dir=.*',
r'^#user=.*',
r'^#group=.*',
r'^shinken_user=\w+',
r'^shinken_group=\w+',
r'^workdir=.+',
r'^lock_file=.+/([^/]+.pid)',
r'^local_log=.+/([^/]+.log)',
],
[
r'modules_dir=%s' % self.modules,
r'user=%s' % self.user,
r'group=%s' % self.group,
r'shinken_user=%s' % self.user,
r'shinken_group=%s' % self.group,
r'workdir=%s' % self.workdir,
r'lock_file=%s/\1' % self.lockdir,
r'local_log=%s/\1' % self.logdir,
]
)
def install_conf_files(self):
"""
Installs shinken configuration files previously generated
Template files are ignored.
"""
conf_files = []
conf_base = os.path.join(default_paths['examples'], 'etc')
for path, subdirs, files in os.walk(conf_base):
for name in files:
if name.endswith(".in"):
continue
conf_file = os.path.join(path, name)
conf_files.append(conf_file)
for filename in conf_files:
conf_file = filename.replace(conf_base, self.confdir)
conf_dir = os.path.dirname(conf_file)
self.mkpath(conf_dir)
self.copy_file(filename, conf_file)
def run(self):
"""
Run command.
"""
self.generate_conf_files()
if os.name == 'nt':
return
self.generate_default_files()
self.generate_init_files()
if self.install_conf:
self.install_conf_files()
if self.install_default:
self.install_default_files()
if self.install_init:
self.install_init_files()
# Enforces files and directories ownership
for c in ['run', 'log', 'var']:
p = default_paths[c]
self.spawn(["chown", "-R", "%s:%s" % (self.user, self.group), p])
for c in ['libexec']:
p = default_paths[c]
self.spawn(["chmod", "-R", "+X", p])
###############################################################################
#
# Debug output
#
###############################################################################
if os.getenv("DEBUG") == "1":
from pprint import pprint
print("Version")
pprint(get_shinken_version())
print("Packages")
pprint(find_packages(
exclude=[
"shinken.webui",
"shinken.webui.bottlecole",
"shinken.webui.bottlewebui"
]
))
print("Requirements")
pprint(get_requirements())
print("Default paths")
pprint(default_paths)
print("Data files")
pprint(data_files)
print("Default files")
pprint(default_files)
print("Init files")
pprint(init_files)
###############################################################################
#
# Setup
#
###############################################################################
setup(
name="Shinken",
version=get_shinken_version(),
packages=find_packages(
exclude=[
"shinken.webui",
"shinken.webui.bottlecole",
"shinken.webui.bottlewebui"
]
),
scripts=scripts,
package_data={'': package_data},
description="Shinken is a monitoring framework compatible with Nagios configuration and plugins",
long_description=read('README.rst'),
author="Gabes Jean",
author_email="naparuba@gmail.com",
license="GNU Affero General Public License",
url="http://www.shinken-monitoring.org",
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
],
install_requires=get_requirements(),
extras_require={
'setproctitle': ['setproctitle']
},
data_files=data_files,
cmdclass={
'post_install': post_install,
},
)
| 24,931
|
Python
|
.py
| 677
| 28.525849
| 101
| 0.525136
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,243
|
.pylintrc
|
shinken-solutions_shinken/.pylintrc
|
[MASTER]
# Specify a configuration file.
#rcfile=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Profiled execution.
profile=no
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
# Pickle collected data for later comparisons.
persistent=yes
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# DEPRECATED
include-ids=no
# DEPRECATED
symbols=no
[MESSAGES CONTROL]
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time. See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=W0142
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html. You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=text
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]".
files-output=no
# Tells whether to display a full report or only the messages
reports=no
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Add a comment according to your evaluation note. This is used by the global
# evaluation report (RP0004).
comment=no
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
# parsable
msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}
[BASIC]
# Required attributes for module, separated by a comma
required-attributes=
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,apply,input,file
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# Regular expression matching correct function names
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for function names
function-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for variable names
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct constant names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Naming hint for constant names
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression matching correct attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for attribute names
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for argument names
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct class attribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Naming hint for class attribute names
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Regular expression matching correct inline iteration names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Naming hint for inline iteration names
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression matching correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Naming hint for class names
class-name-hint=[A-Z_][a-zA-Z0-9]+$
# Regular expression matching correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Naming hint for module names
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression matching correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for method names
method-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=__.*__
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=_$|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus extisting member attributes cannot be deduced by static analysis
ignored-modules=
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set).
ignored-classes=SQLObject
# When zope mode is activated, add a predefined set of Zope acquired attributes
# to generated-members.
zope=no
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E0201 when accessed. Python regular
# expressions are accepted.
generated-members=REQUEST,acl_users,aq_parent
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=100
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
# List of optional constructs for which whitespace checking is disabled
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
[CLASSES]
# List of interface methods to ignore, separated by a comma. This is used for
# instance to not check methods defines in Zope's Interface base class.
ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
[DESIGN]
# Maximum number of arguments for function / method
max-args=5
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of statements in function / method body
max-statements=50
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,TERMIOS,Bastion,rexec
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception
| 9,986
|
Python
|
.py
| 226
| 42.699115
| 266
| 0.780415
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,244
|
test_timeperiods.py
|
shinken-solutions_shinken/test/test_timeperiods.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test timeperiods
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
from shinken.objects.timeperiod import Timeperiod
class TestTimeperiods(ShinkenTest):
def test_simple_timeperiod(self):
self.print_header()
t = Timeperiod()
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_12)
# First a false test, no results
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, '1999-01-28 00:00-24:00')
t_next = t.get_next_valid_time_from_t(now)
self.assertIs(None, t_next)
# Then a simple same day
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, 'tuesday 16:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print(t_next)
self.assertEqual("Tue Jul 13 16:30:00 2010", t_next)
def test_simple_with_multiple_time(self):
self.print_header()
t = Timeperiod()
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_12)
# First a false test, no results
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, '1999-01-28 00:00-07:00,21:30-24:00')
t_next = t.get_next_valid_time_from_t(now)
self.assertIs(None, t_next)
# Then a simple same day
print("Cheking validity for", time.asctime(time.localtime(july_the_12)))
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, 'tuesday 00:00-07:00,21:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("RES:", t_next)
self.assertEqual("Tue Jul 13 00:00:00 2010", t_next)
# Now ask about at 00:00 time?
july_the_12 = time.mktime(time.strptime("12 Jul 2010 00:00:00", "%d %b %Y %H:%M:%S"))
# Then a simple same day
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, 'tuesday 00:00-07:00,21:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next?", t_next)
self.assertEqual("Tue Jul 13 00:00:00 2010", t_next)
def test_simple_with_multiple_time_mutltiple_days(self):
self.print_header()
t = Timeperiod()
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_12)
# First a false test, no results
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, '1999-01-28 00:00-07:00,21:30-24:00')
t_next = t.get_next_valid_time_from_t(now)
self.assertIs(None, t_next)
# Then a simple same day
t = Timeperiod()
t.timeperiod_name = ''
# monday 00:00-07:00,21:30-24:00
# tuesday 00:00-07:00,21:30-24:00
print("Cheking validity for", time.asctime(time.localtime(july_the_12)))
t.resolve_daterange(t.dateranges, 'monday 00:00-07:00,21:30-24:00')
t.resolve_daterange(t.dateranges, 'tuesday 00:00-07:00,21:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("RES:", t_next)
self.assertEqual("Mon Jul 12 21:30:00 2010", t_next)
# what about the next invalid?
t_next_inv = t.get_next_invalid_time_from_t(july_the_12)
t_next_inv = time.asctime(time.localtime(t_next_inv))
print("RES:", t_next_inv)
self.assertEqual("Mon Jul 12 15:00:00 2010", t_next_inv)
# what about a valid time and ask next invalid? Like at 22:00h?
print("GO" * 10)
july_the_12 = time.mktime(time.strptime("12 Jul 2010 22:00:00", "%d %b %Y %H:%M:%S"))
t_next_inv = t.get_next_invalid_time_from_t(july_the_12)
t_next_inv = time.asctime(time.localtime(t_next_inv))
print("RES:", t_next_inv) #, t.is_time_valid(july_the_12))
self.assertEqual("Tue Jul 13 07:01:00 2010", t_next_inv)
# Now ask about at 00:00 time?
july_the_12 = time.mktime(time.strptime("12 Jul 2010 00:00:00", "%d %b %Y %H:%M:%S"))
print("Cheking validity for", time.asctime(time.localtime(july_the_12)))
# Then a simple same day
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, 'monday 00:00-07:00,21:30-24:00')
t.resolve_daterange(t.dateranges, 'tuesday 00:00-07:00,21:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next?", t_next)
self.assertEqual("Mon Jul 12 00:00:00 2010", t_next)
# Now look for the never case
print("24x7" * 10)
t = self.conf.timeperiods.find_by_name('24x7')
self.assertIsNot(t, None)
t_next_inv = t.get_next_invalid_time_from_t(july_the_12)
t_next_inv = time.asctime(time.localtime(t_next_inv))
print("RES:", t_next_inv) #, t.is_time_valid(july_the_12))
self.assertEqual('Wed Jul 13 00:01:00 2011', t_next_inv)
def test_simple_timeperiod_with_exclude(self):
self.print_header()
t = Timeperiod()
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_12)
# First a false test, no results
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, '1999-01-28 00:00-24:00')
t_next = t.get_next_valid_time_from_t(now)
self.assertIs(None, t_next)
# Then a simple same day
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, 'tuesday 16:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print(t_next)
self.assertEqual("Tue Jul 13 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
t2 = Timeperiod()
t2.timeperiod_name = ''
t2.resolve_daterange(t2.dateranges, 'tuesday 08:30-21:00')
t.exclude = [t2]
# So the next will be after 16:30 and not before 21:00. So
# It will be 21:00:01 (first second after invalid is valid)
# we clean the cache of previous calc of t ;)
t.cache = {}
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("T nxt with exclude:", t_next)
self.assertEqual("Tue Jul 13 21:00:01 2010", t_next)
def test_dayweek_timeperiod_with_exclude(self):
self.print_header()
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a simple same day
t = Timeperiod()
t.timeperiod_name = 'T1'
t.resolve_daterange(t.dateranges, 'tuesday 2 16:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("T next", t_next)
self.assertEqual("Tue Jul 13 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
t2 = Timeperiod()
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'tuesday 00:00-23:58')
t.exclude = [t2]
# We are a bad boy: first time period want a tuesday
# but exclude do not want it until 23:58. So next is 58 + 1second :)
t.cache = {}
t_next = t.get_next_valid_time_from_t(july_the_12)
t_exclude = t2.get_next_valid_time_from_t(july_the_12)
t_exclude_inv = t2.get_next_invalid_time_from_t(july_the_12)
print("T next raw", t_next)
t_next = time.asctime(time.localtime(t_next))
print("TOTO T next", t_next)
self.assertEqual('Tue Jul 13 23:58:01 2010', t_next)
def test_mondayweek_timeperiod_with_exclude(self):
self.print_header()
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a simple same day
t = Timeperiod()
t.timeperiod_name = 'T1'
t.resolve_daterange(t.dateranges, 'tuesday 2 16:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
self.assertEqual("Tue Jul 13 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
# And a good one: from april (so before so agust (after), and full time.
# But the 17 is a tuesday, but the 3 of august, so the next 2 tuesday is
# ..... the Tue Sep 14 :) Yes, we should wait quite a lot :)
t2 = Timeperiod()
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'april 1 - august 16 00:00-24:00')
#print(t2.__dict__)
t.exclude = [t2]
# We are a bad boy: first time period want a tuesday
# but exclude do not want it until 23:58. So next is 59 :)
t.cache = {}
t_next = t.get_next_valid_time_from_t(july_the_12)
#print("Check from", time.asctime(time.localtime(july_the_12)))
#t_exclude = t2.get_next_valid_time_from_t(july_the_12)
t_exclude_inv = t2.get_next_invalid_time_from_t(july_the_12)
#print("T2 next valid", time.asctime(time.localtime(t_exclude)))
print("Next invalid T2", time.asctime(time.localtime(t_exclude_inv)))
print("T next raw JEAN", t_next)
print("T next?", time.asctime(time.localtime(t_next)))
t_next = time.asctime(time.localtime(t_next))
self.assertEqual('Tue Sep 14 16:30:00 2010', t_next)
def test_mondayweek_timeperiod_with_exclude_bis(self):
self.print_header()
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a funny daterange
print("Testing daterange", 'tuesday -1 - monday 1 16:30-24:00')
t = Timeperiod()
t.timeperiod_name = 'T1'
t.resolve_daterange(t.dateranges, 'tuesday -1 - monday 1 16:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next without exclude", t_next)
self.assertEqual("Tue Jul 27 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
# And a good one: from april (so before so agust (after), and full time.
# But the 27 is nw not possible? So what next? Add a month!
# last tuesday of august, the 31 :)
t2 = Timeperiod()
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'april 1 - august 16 00:00-24:00')
#print(t2.__dict__)
t.exclude = [t2]
# We are a bad boy: first time period want a tuesday
# but exclude do not want it until 23:58. So next is 59 :)
t.cache = {}
t_next = t.get_next_valid_time_from_t(july_the_12)
#print("Check from", time.asctime(time.localtime(july_the_12)))
#t_exclude = t2.get_next_valid_time_from_t(july_the_12)
t_exclude_inv = t2.get_next_invalid_time_from_t(july_the_12)
#print("T2 next valid", time.asctime(time.localtime(t_exclude)))
print("Next invalid T2", time.asctime(time.localtime(t_exclude_inv)))
print("T next raw JEAN2", t_next)
print("T next?", time.asctime(time.localtime(t_next)))
t_next = time.asctime(time.localtime(t_next))
self.assertEqual('Tue Aug 31 16:30:00 2010', t_next)
def test_funky_mondayweek_timeperiod_with_exclude_and_multiple_daterange(self):
self.print_header()
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a funny daterange
print("Testing daterange", 'tuesday -1 - monday 1 16:30-24:00')
t = Timeperiod()
t.timeperiod_name = 'T1'
t.resolve_daterange(t.dateranges, 'tuesday -1 - monday 1 16:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next without exclude", t_next)
self.assertEqual("Tue Jul 27 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
# And a good one: from april (so before so agust (after), and full time.
# But the 27 is nw not possible? So what next? Add a month!
# But maybe it's not enoutgth? :)
# The withoutthe 2nd exclude, it's the Tues Aug 31, btu it's inside
# saturday -1 - monday 1 because saturday -1 is the 28 august, so no.
# in september saturday -1 is the 25, and tuesday -1 is 28, so still no
# A month again! So now tuesday -1 is 26 and saturday -1 is 30. So ok
# for this one! that was quite long isn't it? And funky! :)
t2 = Timeperiod()
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'april 1 - august 16 00:00-24:00')
# Oups, I add a inner daterange ;)
t2.resolve_daterange(t2.dateranges, 'saturday -1 - monday 1 16:00-24:00')
t.exclude = [t2]
# We are a bad boy: first time period want a tuesday
# but exclude do not want it until 23:58. So next is 59 :)
t.cache = {}
t_next = t.get_next_valid_time_from_t(july_the_12)
#print("Check from", time.asctime(time.localtime(july_the_12)))
#t_exclude = t2.get_next_valid_time_from_t(july_the_12)
t_exclude_inv = t2.get_next_invalid_time_from_t(july_the_12)
#print("T2 next valid", time.asctime(time.localtime(t_exclude)))
print("Next invalid T2", time.asctime(time.localtime(t_exclude_inv)))
print("T next raw", t_next)
print("T next?", time.asctime(time.localtime(t_next)))
t_next = time.asctime(time.localtime(t_next))
self.assertEqual('Tue Oct 26 16:30:00 2010', t_next)
print("Finish this Funky test :)")
def test_monweekday_timeperiod_with_exclude(self):
self.print_header()
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a funny daterange
print("Testing daterange", 'tuesday -1 july - monday 1 august 16:30-24:00')
t = Timeperiod()
t.timeperiod_name = 'T1'
t.resolve_daterange(t.dateranges, 'tuesday -1 july - monday 1 september 16:30-24:00')
t_next = t.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next without exclude", t_next)
self.assertEqual("Tue Jul 27 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
# and from april (before) to august monday 3 (monday 16 august),
# so Jul 17 is no more possible. So just after it, Tue 17
t2 = Timeperiod()
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'thursday 1 april - monday 3 august 00:00-24:00')
print(t2.dateranges[0].__dict__)
t.exclude = [t2]
# We are a bad boy: first time period want a tuesday
# but exclude do not want it until 23:58. So next is 59 :)
t.cache = {}
t_next = t.get_next_valid_time_from_t(july_the_12)
#print("Check from", time.asctime(time.localtime(july_the_12)))
#t_exclude = t2.get_next_valid_time_from_t(july_the_12)
t_exclude_inv = t2.get_next_invalid_time_from_t(july_the_12)
#print("T2 next valid", time.asctime(time.localtime(t_exclude)))
print("Next invalid T2", time.asctime(time.localtime(t_exclude_inv)))
print("T next raw", t_next)
print("T next?", time.asctime(time.localtime(t_next)))
t_next = time.asctime(time.localtime(t_next))
self.assertEqual('Tue Aug 17 16:30:00 2010', t_next)
def test_dayweek_exclusion_timeperiod(self):
self.print_header()
t = Timeperiod()
now = time.time()
# Get the 13 of july 2010 at 15:00, tuesday
july_the_13 = time.mktime(time.strptime("13 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_13)
# Now we add this timeperiod an exception
t2 = Timeperiod()
t2.timeperiod_name = ''
t2.resolve_daterange(t2.dateranges, 'tuesday 00:00-24:00')
t.exclude = [t2]
t.resolve_daterange(t.dateranges, 'monday 00:00-24:00')
t.resolve_daterange(t.dateranges, 'tuesday 00:00-24:00')
t.resolve_daterange(t.dateranges, 'wednesday 00:00-24:00')
t_next = t.get_next_valid_time_from_t(july_the_13)
t_next = time.asctime(time.localtime(t_next))
print("T next", t_next)
self.assertEqual("Wed Jul 14 00:00:00 2010", t_next)
def test_dayweek_exclusion_timeperiod_with_day_range(self):
self.print_header()
t = Timeperiod()
# Get the 13 of july 2010 at 15:00, tuesday
july_the_13 = time.mktime(time.strptime("13 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_13)
# Now we add this timeperiod an exception
t2 = Timeperiod()
t2.timeperiod_name = ''
t2.resolve_daterange(t2.dateranges, 'tuesday 00:00-24:00')
t.exclude = [t2]
t.resolve_daterange(t.dateranges, '2010-03-01 - 2020-03-01 00:00-24:00')
t_next = t.get_next_valid_time_from_t(july_the_13)
t_next = time.asctime(time.localtime(t_next))
now = time.time()
now = time.asctime(time.localtime(now))
print("T next", t_next)
# print("T now", now)
# self.assertEqual(now, t_next)
self.assertEqual("Wed Jul 14 00:00:01 2010", t_next)
# short test to check the invalid function of timeranges
def test_next_invalid_day(self):
self.print_header()
# Get the 13 of july 2010 at 15:00, tuesday
july_the_13 = time.mktime(time.strptime("13 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_13)
t = Timeperiod()
t.timeperiod_name = 'test_next_invalid_day'
t.resolve_daterange(t.dateranges, 'tuesday 00:00-24:00')
t.exclude = []
t_next_invalid = t.get_next_invalid_time_from_t(july_the_13)
t_next_invalid = time.asctime(time.localtime(t_next_invalid))
print("T next invalid", t_next_invalid)
self.assertEqual("Wed Jul 14 00:00:01 2010", t_next_invalid)
def test_issue_1385(self):
'''
https://github.com/naparuba/shinken/issues/1385
'''
tp = Timeperiod()
tp.timeperiod_name = 'mercredi2-22-02'
tp.resolve_daterange(tp.dateranges, 'wednesday 2 00:00-02:00,22:00-24:00')
tp.resolve_daterange(tp.dateranges, 'thursday 2 00:00-02:00,22:00-24:00')
valid_times = (
(2014, 11, 12, 1, 0), # second wednesday of november @ 01:00
(2014, 11, 12, 23, 0), # same @23:00
(2014, 11, 13, 0, 0), # second thursday @ 00:00
# in december:
(2014, 12, 10, 1, 0),
(2014, 12, 10, 23, 0),
(2014, 12, 11, 1, 0),
(2014, 12, 11, 23, 0),
)
for valid in valid_times:
dt = datetime.datetime(*valid)
valid_tm = time.mktime(dt.timetuple())
self.assertTrue(tp.is_time_valid(valid_tm))
invalid_times = (
(2014, 11, 3, 1, 0), # first wednesday ..
(2014, 11, 4, 1, 0), # first thursday
(2014, 11, 17, 1, 0), # third ..
(2014, 11, 18, 1, 0),
# in december:
(2014, 12, 5, 3, 0),
(2014, 12, 17, 1, 0),
(2014, 12, 18, 1, 0),
(2014, 12, 24, 1, 0),
(2014, 12, 25, 1, 0),
(2014, 12, 31, 1, 0),
)
for invalid in invalid_times:
dt = datetime.datetime(*invalid)
invalid_tm = time.mktime(dt.timetuple())
self.assertFalse(tp.is_time_valid(invalid_tm))
if __name__ == '__main__':
unittest.main()
| 21,783
|
Python
|
.py
| 440
| 40.765909
| 96
| 0.60692
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,245
|
test_conflict_policy.py
|
shinken-solutions_shinken/test/test_conflict_policy.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestDefinitionOrder(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_conflict_policy.cfg')
def test_conflict_policy(self):
self.assertFalse(self.conf.conf_is_correct)
#[b.prepare() for b in self.broks]
logs = [b.data['log'] for b in self.broks if b.type == 'log']
mesg = 'duplicate host name test_host_conflict'
self.assertEqual(1, len([log for log in logs if re.search(mesg, log)]) )
mesg = 'duplicate service name test_host_specific/test_service_conflict'
self.assertEqual(1, len([log for log in logs if re.search(mesg, log)]) )
mesg = 'duplicate service name test_host_specific/ZE-SERVICE'
self.assertEqual(1, len([log for log in logs if re.search(mesg, log)]) )
if __name__ == '__main__':
unittest.main()
| 1,823
|
Python
|
.py
| 39
| 43.128205
| 82
| 0.722097
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,246
|
test_nocontacts.py
|
shinken-solutions_shinken/test/test_nocontacts.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestNoContact(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_nocontacts.cfg')
# Seems that Nagios allow non contacts elements, just warning
# and not error. Should do the same.
def test_nocontact(self):
host = self.sched.hosts.find_by_name("test_host_0")
self.assertEqual([], host.contacts)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
self.assertEqual([], svc.contacts)
self.assertTrue(self.sched.conf.is_correct)
if __name__ == '__main__':
unittest.main()
| 1,588
|
Python
|
.py
| 37
| 39.837838
| 91
| 0.735409
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,247
|
test_servicetpl_no_hostname.py
|
shinken-solutions_shinken/test/test_servicetpl_no_hostname.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestsericeTplNoHostname(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_servicetpl_no_hostname.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
if __name__ == '__main__':
unittest.main()
| 2,134
|
Python
|
.py
| 48
| 39.854167
| 134
| 0.688974
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,248
|
test_reversed_list.py
|
shinken-solutions_shinken/test/test_reversed_list.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import ShinkenTest, unittest
from shinken.misc.regenerator import Regenerator
from shinken.brok import Brok
class TestReversedList(ShinkenTest):
def setUp(self):
self.setup_with_file("etc/shinken_service_withhost_exclude.cfg")
def test_reversed_list(self):
""" Test to ensure new conf is properly merge with different servicegroup definition
The first conf has all its servicegroup defined servicegroups.cfg and services.cfg
The second conf has both, so that servicegroups defined ins services.cfg are genretaed by Shinken
This lead to another generated id witch should be handled properly when regenerating reversed list / merging
servicegroups definition
"""
sg = self.sched.servicegroups.find_by_name('servicegroup_01')
prev_id = sg.id
reg = Regenerator()
data = {"instance_id": 0}
b = Brok('program_status', data)
#b.prepare()
reg.manage_program_status_brok(b)
reg.all_done_linking(0)
self.setup_with_file("etc/shinken_reversed_list.cfg")
reg.all_done_linking(0)
#for service in self.sched.servicegroups:
# assert(service.servicegroup_name in self.sched.servicegroups.reversed_list.keys())
# assert(service.id == self.sched.servicegroups.reversed_list[service.servicegroup_name])
sg = self.sched.servicegroups.find_by_name('servicegroup_01')
assert(prev_id != sg.id)
for sname in [u'servicegroup_01', u'ok', u'flap', u'unknown', u'random',
u'servicegroup_02', u'servicegroup_03', u'warning', u'critical',
u'servicegroup_04', u'servicegroup_05', u'pending', u'mynewgroup']:
sg = self.sched.servicegroups.find_by_name(sname)
assert(sname is not None)
if __name__ == '__main__':
unittest.main()
| 2,005
|
Python
|
.py
| 37
| 45.621622
| 116
| 0.680328
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,249
|
test_get_checks.py
|
shinken-solutions_shinken/test/test_get_checks.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
import time
class TestPollerTagGetchecks(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_get_checks.cfg')
def test_get_whole_checks(self):
self.sched.schedule()
self.sched.get_new_actions()
checks = self.sched.checks
time.sleep(60)
self.assertEqual(len(checks), 12)
for c in checks.values():
self.assertEqual(c.status, "scheduled")
self.assertEquals(c.worker, "none")
in_poller = self.sched.get_to_run_checks(
do_checks=True,
do_actions=False,
worker_name='test')
self.assertTrue(len(in_poller), 12)
for c in checks.values():
self.assertEqual(c.status, "inpoller")
self.assertEquals(c.worker, "test")
def test_get_most_urgent_checks(self):
self.sched.schedule()
self.sched.get_new_actions()
checks = self.sched.checks
time.sleep(60)
self.assertEqual(len(checks), 12)
for c in checks.values():
self.assertEqual(c.status, "scheduled")
self.assertEquals(c.worker, "none")
in_poller = self.sched.get_to_run_checks(
do_checks=True,
do_actions=False,
worker_name='test',
max_actions=3)
self.assertTrue(len(in_poller), 3)
for c in checks.values():
if c.priority == 10:
self.assertEqual(c.status, "inpoller")
self.assertEquals(c.worker, "test")
else:
self.assertEqual(c.status, "scheduled")
self.assertEquals(c.worker, "none")
if __name__ == '__main__':
unittest.main()
| 2,700
|
Python
|
.py
| 69
| 31.884058
| 82
| 0.651608
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,250
|
test_dot_virg_in_command.py
|
shinken-solutions_shinken/test/test_dot_virg_in_command.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_dot_virg_in_command.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
print(svc.event_handler.args)
self.assertIn('sudo -s pkill toto ; cd /my/path && ./toto', svc.event_handler.args)
if __name__ == '__main__':
unittest.main()
| 2,020
|
Python
|
.py
| 47
| 38.468085
| 91
| 0.693992
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,251
|
test_passive_pollers.py
|
shinken-solutions_shinken/test/test_passive_pollers.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class GoodArbiter(ArbiterLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def have_conf(self, i):
return True
def do_not_run(self):
pass
class GoodScheduler(SchedulerLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def have_conf(self, i):
return True
def put_conf(self, conf):
return True
class BadScheduler(SchedulerLink):
def ping(self):
print("Dummy bad ping", self.get_name())
self.add_failed_check_attempt()
def have_conf(self, i):
return False
class GoodPoller(PollerLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def put_conf(self, conf):
return True
class BadPoller(PollerLink):
def ping(self):
print("Dummy bad ping", self.get_name())
self.add_failed_check_attempt()
class GoodReactionner(ReactionnerLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def put_conf(self, conf):
return True
class BadReactionner(ReactionnerLink):
def ping(self):
print("Dummy bad ping", self.get_name())
self.add_failed_check_attempt()
class GoodBroker(BrokerLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def put_conf(self, conf):
return True
class BadBroker(BrokerLink):
def ping(self):
print("Dummy bad ping", self.get_name())
self.add_failed_check_attempt()
class TestPassivePoller(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_passive_pollers.cfg')
def test_simple_passive_pollers(self):
print("The dispatcher", self.dispatcher)
# dummy for the arbiter
for a in self.conf.arbiters:
a.__class__ = GoodArbiter
print("Preparing schedulers")
scheduler1 = self.conf.schedulers.find_by_name('scheduler-all-1')
self.assertIsNot(scheduler1, None)
scheduler1.__class__ = GoodScheduler
scheduler2 = self.conf.schedulers.find_by_name('scheduler-all-2')
self.assertIsNot(scheduler2, None)
scheduler2.__class__ = BadScheduler
# Poller 1 is normal, 2 and 3 are passives
print("Preparing pollers")
poller1 = self.conf.pollers.find_by_name('poller-all-1')
self.assertIsNot(poller1, None)
poller1.__class__ = GoodPoller
print(poller1.__dict__)
self.assertEqual(False, poller1.passive)
poller2 = self.conf.pollers.find_by_name('poller-all-2')
self.assertIsNot(poller2, None)
poller2.__class__ = GoodPoller
self.assertEqual(True, poller2.passive)
poller3 = self.conf.pollers.find_by_name('poller-all-3')
self.assertIsNot(poller3, None)
poller3.__class__ = GoodPoller
self.assertEqual(True, poller3.passive)
print("Preparing reactionners")
reactionner1 = self.conf.reactionners.find_by_name('reactionner-all-1')
self.assertIsNot(reactionner1, None)
reactionner1.__class__ = GoodReactionner
reactionner2 = self.conf.reactionners.find_by_name('reactionner-all-2')
self.assertIsNot(reactionner2, None)
reactionner2.__class__ = BadReactionner
print("Preparing brokers")
broker1 = self.conf.brokers.find_by_name('broker-all-1')
self.assertIsNot(broker1, None)
broker1.__class__ = GoodBroker
broker2 = self.conf.brokers.find_by_name('broker-all-2')
self.assertIsNot(broker2, None)
broker2.__class__ = BadBroker
# Ping all elements. Should have 1 as OK, 2 as
# one bad attempt (3 max)
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, scheduler1.alive)
self.assertEqual(0, scheduler1.attempt)
self.assertEqual(True, scheduler1.reachable)
# still alive, just unreach
self.assertEqual(True, scheduler2.alive)
self.assertEqual(1, scheduler2.attempt)
self.assertEqual(False, scheduler2.reachable)
# and others satellites too
self.assertEqual(True, poller1.alive)
self.assertEqual(0, poller1.attempt)
self.assertEqual(True, poller1.reachable)
# still alive, just unreach
self.assertEqual(True, poller2.alive)
self.assertEqual(0, poller2.attempt)
self.assertEqual(True, poller2.reachable)
# and others satellites too
self.assertEqual(True, reactionner1.alive)
self.assertEqual(0, reactionner1.attempt)
self.assertEqual(True, reactionner1.reachable)
# still alive, just unreach
self.assertEqual(True, reactionner2.alive)
self.assertEqual(1, reactionner2.attempt)
self.assertEqual(False, reactionner2.reachable)
# and others satellites too
self.assertEqual(True, broker1.alive)
self.assertEqual(0, broker1.attempt)
self.assertEqual(True, broker1.reachable)
# still alive, just unreach
self.assertEqual(True, broker2.alive)
self.assertEqual(1, broker2.attempt)
self.assertEqual(False, broker2.reachable)
time.sleep(60)
### Now add another attempt, still alive, but attemp=2/3
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, scheduler1.alive)
self.assertEqual(0, scheduler1.attempt)
self.assertEqual(True, scheduler1.reachable)
# still alive, just unreach
self.assertEqual(True, scheduler2.alive)
#import pdb; pdb.set_trace()
self.assertEqual(2, scheduler2.attempt)
self.assertEqual(False, scheduler2.reachable)
# and others satellites too
self.assertEqual(True, poller1.alive)
self.assertEqual(0, poller1.attempt)
self.assertEqual(True, poller1.reachable)
# still alive, just unreach
self.assertEqual(True, poller2.alive)
self.assertEqual(0, poller2.attempt)
self.assertEqual(True, poller2.reachable)
# and others satellites too
self.assertEqual(True, reactionner1.alive)
self.assertEqual(0, reactionner1.attempt)
self.assertEqual(True, reactionner1.reachable)
# still alive, just unreach
self.assertEqual(True, reactionner2.alive)
self.assertEqual(2, reactionner2.attempt)
self.assertEqual(False, reactionner2.reachable)
# and others satellites too
self.assertEqual(True, broker1.alive)
self.assertEqual(0, broker1.attempt)
self.assertEqual(True, broker1.reachable)
# still alive, just unreach
self.assertEqual(True, broker2.alive)
self.assertEqual(2, broker2.attempt)
self.assertEqual(False, broker2.reachable)
time.sleep(60)
### Now we get BAD, We go DEAD for N2!
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, scheduler1.alive)
self.assertEqual(0, scheduler1.attempt)
self.assertEqual(True, scheduler1.reachable)
# still alive, just unreach
self.assertEqual(False, scheduler2.alive)
self.assertEqual(3, scheduler2.attempt)
self.assertEqual(False, scheduler2.reachable)
# and others satellites too
self.assertEqual(True, poller1.alive)
self.assertEqual(0, poller1.attempt)
self.assertEqual(True, poller1.reachable)
# still alive, just unreach
self.assertEqual(True, poller2.alive)
self.assertEqual(0, poller2.attempt)
self.assertEqual(True, poller2.reachable)
# and others satellites too
self.assertEqual(True, reactionner1.alive)
self.assertEqual(0, reactionner1.attempt)
self.assertEqual(True, reactionner1.reachable)
# still alive, just unreach
self.assertEqual(False, reactionner2.alive)
self.assertEqual(3, reactionner2.attempt)
self.assertEqual(False, reactionner2.reachable)
# and others satellites too
self.assertEqual(True, broker1.alive)
self.assertEqual(0, broker1.attempt)
self.assertEqual(True, broker1.reachable)
# still alive, just unreach
self.assertEqual(False, broker2.alive)
self.assertEqual(3, broker2.attempt)
self.assertEqual(False, broker2.reachable)
# Now we check how we should dispatch confs
self.dispatcher.check_dispatch()
# the conf should not be in a good shape
self.assertEqual(False, self.dispatcher.dispatch_ok)
# Now we really dispatch them!
self.dispatcher.dispatch()
self.assert_any_log_match('Dispatch OK of conf in scheduler scheduler-all-1')
self.assert_any_log_match('Dispatch OK of configuration 0 to reactionner reactionner-all-1')
self.assert_any_log_match('Dispatch OK of configuration 0 to poller poller-all-1')
self.assert_any_log_match('Dispatch OK of configuration 0 to broker broker-all-1')
self.clear_logs()
# And look if we really dispatch conf as we should
for r in self.conf.realms:
for cfg in r.confs.values():
self.assertEqual(True, cfg.is_assigned)
self.assertEqual(scheduler1, cfg.assigned_to)
if __name__ == '__main__':
unittest.main()
| 10,617
|
Python
|
.py
| 249
| 34.827309
| 100
| 0.677144
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,252
|
test_bad_notification_period.py
|
shinken-solutions_shinken/test/test_bad_notification_period.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestBadNotificationPeriod(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_notification_period.cfg')
# if a notif period is bad, should be catched!
def test_bad_notification_period(self):
self.assertEqual(False, self.conf.conf_is_correct)
if __name__ == '__main__':
unittest.main()
| 1,339
|
Python
|
.py
| 32
| 39.5625
| 82
| 0.756549
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,253
|
test_protect_esclamation_point.py
|
shinken-solutions_shinken/test/test_protect_esclamation_point.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestProtectEscalmationPoint(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_protect_esclamation_point.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
print(svc.check_command.args)
self.assertIn(u'ti!ti', svc.check_command.args)
if __name__ == '__main__':
unittest.main()
| 2,006
|
Python
|
.py
| 47
| 38.191489
| 91
| 0.700154
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,254
|
test_definition_order.py
|
shinken-solutions_shinken/test/test_definition_order.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestDefinitionOrder(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_definition_order.cfg')
def test_definition_order(self):
print("Get the hosts and services")
now = time.time()
svc_specific = self.sched.services.find_srv_by_name_and_hostname("test_host_specific", "ZE-SERVICE")
svc_generic = self.sched.services.find_srv_by_name_and_hostname("test_host_generic", "ZE-SERVICE")
self.assertIsNot(svc_specific, None)
self.assertIsNot(svc_generic, None)
print(svc_generic.check_command.command.command_name)
self.assertEqual('general', svc_generic.check_command.command.command_name)
print(svc_specific.check_command.command.command_name)
self.assertEqual('specific', svc_specific.check_command.command.command_name)
if __name__ == '__main__':
unittest.main()
| 1,884
|
Python
|
.py
| 40
| 43.275
| 108
| 0.740033
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,255
|
test_business_correlator_output.py
|
shinken-solutions_shinken/test/test_business_correlator_output.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test business rules output based on template expansion.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken_test import unittest, ShinkenTest, time_hacker
from shinken.macroresolver import MacroResolver
class TestBusinesscorrelOutput(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_business_correlator_output.cfg')
def test_bprule_empty_output(self):
svc_cor = self.sched.services.find_srv_by_name_and_hostname("dummy", "empty_bp_rule_output")
self.assertIs(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
self.assertEqual("", svc_cor.get_business_rule_output())
def test_bprule_expand_template_macros(self):
svc_cor = self.sched.services.find_srv_by_name_and_hostname("dummy", "formatted_bp_rule_output")
self.assertIs(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
svc1 = self.sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc3 = self.sched.services.find_srv_by_name_and_hostname("test_host_03", "srv3")
hst4 = self.sched.hosts.find_by_name("test_host_04")
for i in range(2):
self.scheduler_loop(1, [
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 1, 'WARNING test_host_02/srv2'],
[svc3, 2, 'CRITICAL test_host_03/srv3'],
[hst4, 2, 'DOWN test_host_04']])
time.sleep(61)
self.sched.manage_internal_checks()
self.sched.consume_results()
# Performs checks
m = MacroResolver()
template = "$STATUS$,$SHORTSTATUS$,$HOSTNAME$,$SERVICEDESC$,$FULLNAME$"
data = svc1.get_data_for_checks()
output = m.resolve_simple_macros_in_string(template, data)
self.assertEqual("OK,O,test_host_01,srv1,test_host_01/srv1", output)
data = svc2.get_data_for_checks()
output = m.resolve_simple_macros_in_string(template, data)
self.assertEqual("WARNING,W,test_host_02,srv2,test_host_02/srv2", output)
data = svc3.get_data_for_checks()
output = m.resolve_simple_macros_in_string(template, data)
self.assertEqual("CRITICAL,C,test_host_03,srv3,test_host_03/srv3", output)
data = hst4.get_data_for_checks()
output = m.resolve_simple_macros_in_string(template, data)
self.assertEqual("DOWN,D,test_host_04,,test_host_04", output)
data = svc_cor.get_data_for_checks()
output = m.resolve_simple_macros_in_string(template, data)
self.assertEqual("CRITICAL,C,dummy,formatted_bp_rule_output,dummy/formatted_bp_rule_output", output)
def test_bprule_output(self):
svc_cor = self.sched.services.find_srv_by_name_and_hostname("dummy", "formatted_bp_rule_output")
self.assertIs(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
self.assertEqual("$STATUS$ $([$STATUS$: $FULLNAME$] )$", svc_cor.business_rule_output_template)
svc1 = self.sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc3 = self.sched.services.find_srv_by_name_and_hostname("test_host_03", "srv3")
hst4 = self.sched.hosts.find_by_name("test_host_04")
for i in range(2):
self.scheduler_loop(1, [
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 1, 'WARNING test_host_02/srv2'],
[svc3, 2, 'CRITICAL test_host_03/srv3'],
[hst4, 2, 'DOWN test_host_04']])
time.sleep(61)
self.sched.manage_internal_checks()
self.sched.consume_results()
# Performs checks
output = svc_cor.output
self.assertGreater(output.find("[WARNING: test_host_02/srv2]"), 0)
self.assertGreater(output.find("[CRITICAL: test_host_03/srv3]"), 0)
self.assertGreater(output.find("[DOWN: test_host_04]"), 0)
# Should not display OK state checks
self.assertEqual(-1, output.find("[OK: test_host_01/srv1]") )
self.assertTrue(output.startswith("CRITICAL"))
def test_bprule_xof_one_critical_output(self):
svc_cor = self.sched.services.find_srv_by_name_and_hostname("dummy", "formatted_bp_rule_xof_output")
self.assertIs(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
self.assertEqual("$STATUS$ $([$STATUS$: $FULLNAME$] )$", svc_cor.business_rule_output_template)
svc1 = self.sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc3 = self.sched.services.find_srv_by_name_and_hostname("test_host_03", "srv3")
hst4 = self.sched.hosts.find_by_name("test_host_04")
for i in range(2):
self.scheduler_loop(1, [
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 0, 'OK test_host_02/srv2'],
[svc3, 2, 'CRITICAL test_host_03/srv3'],
[hst4, 0, 'UP test_host_04']])
time.sleep(61)
self.sched.manage_internal_checks()
self.sched.consume_results()
# Performs checks
self.assertEqual(0, svc_cor.business_rule.get_state())
self.assertEqual("OK [CRITICAL: test_host_03/srv3]", svc_cor.output)
def test_bprule_xof_all_ok_output(self):
svc_cor = self.sched.services.find_srv_by_name_and_hostname("dummy", "formatted_bp_rule_xof_output")
self.assertIs(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
self.assertEqual("$STATUS$ $([$STATUS$: $FULLNAME$] )$", svc_cor.business_rule_output_template)
svc1 = self.sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc3 = self.sched.services.find_srv_by_name_and_hostname("test_host_03", "srv3")
hst4 = self.sched.hosts.find_by_name("test_host_04")
for i in range(2):
self.scheduler_loop(1, [
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 0, 'OK test_host_02/srv2'],
[svc3, 0, 'OK test_host_03/srv3'],
[hst4, 0, 'UP test_host_04']])
time.sleep(61)
self.sched.manage_internal_checks()
self.sched.consume_results()
# Performs checks
self.assertEqual(0, svc_cor.business_rule.get_state())
self.assertEqual("OK all checks were successful.", svc_cor.output)
if __name__ == '__main__':
unittest.main()
| 7,678
|
Python
|
.py
| 139
| 46.805755
| 108
| 0.654194
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,256
|
test_servicedependency_implicit_hostgroup.py
|
shinken-solutions_shinken/test/test_servicedependency_implicit_hostgroup.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestServiceDepAndGroups(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_servicedependency_implicit_hostgroup.cfg')
def test_implicithostgroups(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc_postfix = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "POSTFIX")
self.assertIsNot(svc_postfix, None)
svc_snmp = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "SNMP")
self.assertIsNot(svc_snmp, None)
svc_cpu = self.sched.services.find_srv_by_name_and_hostname("test_router_0", "CPU")
self.assertIsNot(svc_cpu, None)
svc_snmp2 = self.sched.services.find_srv_by_name_and_hostname("test_router_0", "SNMP")
self.assertIsNot(svc_snmp2, None)
svc_postfix_fathers = [c[0].get_full_name() for c in svc_postfix.act_depend_of]
print(svc_postfix_fathers)
# Should be [u'test_router_0/SNMP', u'test_host_0/SNMP', u'test_host_0']
self.assertIn('test_router_0/SNMP', svc_postfix_fathers)
self.assertIn('test_host_0/SNMP', svc_postfix_fathers)
# Now look for the routers services
svc_cpu_fathers = [c[0].get_full_name() for c in svc_cpu.act_depend_of]
print(svc_cpu_fathers)
# Should be [u'test_router_0/SNMP', u'test_host_0/SNMP', u'test_host_0']
self.assertIn('test_router_0/SNMP', svc_cpu_fathers)
self.assertIn('test_host_0/SNMP', svc_cpu_fathers)
svc.act_depend_of = [] # no hostchecks on critical checkresults
def test_implicithostnames(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
svc_postfix = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "POSTFIX_BYSSH")
self.assertIsNot(svc_postfix, None)
svc_ssh = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "SSH")
self.assertIsNot(svc_ssh, None)
svc_cpu = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "CPU_BYSSH")
self.assertIsNot(svc_cpu, None)
svc_postfix_fathers = [c[0].get_full_name() for c in svc_postfix.act_depend_of]
print(svc_postfix_fathers)
# Should be [u'test_router_0/SNMP', u'test_host_0/SNMP', u'test_host_0']
self.assertIn('test_host_0/SSH', svc_postfix_fathers)
# Now look for the routers services
svc_cpu_fathers = [c[0].get_full_name() for c in svc_cpu.act_depend_of]
print(svc_cpu_fathers)
# Should be [u'test_router_0/SNMP', u'test_host_0/SNMP', u'test_host_0']
self.assertIn('test_host_0/SSH', svc_cpu_fathers)
if __name__ == '__main__':
unittest.main()
| 4,033
|
Python
|
.py
| 79
| 44.632911
| 103
| 0.678953
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,257
|
test_clean_sched_queues.py
|
shinken-solutions_shinken/test/test_clean_sched_queues.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestSchedCleanQueues(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_clean_sched_queues.cfg')
# Try to generate a bunch of external commands
# and see if they are drop like it should
def test_sched_clean_queues(self):
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#host.__class__.obsess_over = True
#host.obsess_over_host = True
for i in range(1, 1001):
host.get_obsessive_compulsive_processor_command()
print("New len", len(host.actions))
self.assertGreaterEqual(len(host.actions), 1000)
self.sched.get_new_actions()
print(len(self.sched.actions))
# So get our 1000 external commands
self.assertGreaterEqual(len(self.sched.actions), 1000)
# Try to call the clean, they are just too many!
self.sched.clean_queues()
# Should have something like 16 event handler
print(len(self.sched.actions))
self.assertLess(len(self.sched.actions), 30)
# Now for Notifications and co
for i in range(1, 1001):
host.create_notifications('PROBLEM')
self.sched.get_new_actions()
print(len(self.sched.actions))
# So get our 1000 notifications
self.assertGreaterEqual(len(self.sched.actions), 1000)
# Try to call the clean, they are just too many!
self.sched.clean_queues()
print(len(self.sched.actions))
self.assertLess(len(self.sched.actions), 30)
##### And now broks
l = []
for i in range(1, 1001):
b = host.get_update_status_brok()
l.append(b)
host.broks = l
self.sched.get_new_broks()
print("LEn broks", len(self.sched.broks))
self.assertGreaterEqual(len(self.sched.broks), 1000)
self.sched.clean_queues()
print("LEn broks", len(self.sched.broks))
self.assertLess(len(self.sched.broks), 30)
if __name__ == '__main__':
unittest.main()
| 3,528
|
Python
|
.py
| 81
| 36.975309
| 91
| 0.667735
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,258
|
test_notifway.py
|
shinken-solutions_shinken/test/test_notifway.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken_test import unittest, ShinkenTest
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_notif_way.cfg')
def test_contact_def(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the contact")
now = time.time()
contact = self.sched.contacts.find_by_name("test_contact")
print("The contact", contact.__dict__)
print("All notification Way:")
for nw in self.sched.notificationways:
print("\t", nw.notificationway_name)
email_in_day = self.sched.notificationways.find_by_name('email_in_day')
self.assertIn(email_in_day, contact.notificationways)
email_s_cmd = email_in_day.service_notification_commands.pop()
email_h_cmd = email_in_day.host_notification_commands.pop()
sms_the_night = self.sched.notificationways.find_by_name('sms_the_night')
self.assertIn(sms_the_night, contact.notificationways)
sms_s_cmd = sms_the_night.service_notification_commands.pop()
sms_h_cmd = sms_the_night.host_notification_commands.pop()
# And check the criticity values
self.assertEqual(0, email_in_day.min_business_impact)
self.assertEqual(5, sms_the_night.min_business_impact)
print("Contact notification way(s):")
for nw in contact.notificationways:
print("\t", nw.notificationway_name)
for c in nw.service_notification_commands:
print("\t\t", c.get_name())
contact_simple = self.sched.contacts.find_by_name("test_contact_simple")
# It's the created notifway for this simple contact
test_contact_simple_inner_notificationway = self.sched.notificationways.find_by_name("test_contact_simple_inner_notificationway")
print("Simple contact")
for nw in contact_simple.notificationways:
print("\t", nw.notificationway_name)
for c in nw.service_notification_commands:
print("\t\t", c.get_name())
self.assertIn(test_contact_simple_inner_notificationway, contact_simple.notificationways)
# we take as criticity a huge value from now
huge_criticity = 5
# Now all want* functions
# First is ok with warning alerts
self.assertEqual(True, email_in_day.want_service_notification(now, 'WARNING', 'PROBLEM', huge_criticity) )
# But a SMS is now WAY for warning. When we sleep, we wake up for critical only guy!
self.assertEqual(False, sms_the_night.want_service_notification(now, 'WARNING', 'PROBLEM', huge_criticity) )
# Same with contacts now
# First is ok for warning in the email_in_day nw
self.assertEqual(True, contact.want_service_notification(now, 'WARNING', 'PROBLEM', huge_criticity) )
# Simple is not ok for it
self.assertEqual(False, contact_simple.want_service_notification(now, 'WARNING', 'PROBLEM', huge_criticity) )
# Then for host notification
# First is ok for warning in the email_in_day nw
self.assertEqual(True, contact.want_host_notification(now, 'FLAPPING', 'PROBLEM', huge_criticity) )
# Simple is not ok for it
self.assertEqual(False, contact_simple.want_host_notification(now, 'FLAPPING', 'PROBLEM', huge_criticity) )
# And now we check that we refuse SMS for a low level criticity
# I do not want to be awaken by a dev server! When I sleep, I sleep!
# (and my wife will kill me if I do...)
# We take the EMAIL test because SMS got the night ony, so we take a very low value for criticity here
self.assertEqual(False, email_in_day.want_service_notification(now, 'WARNING', 'PROBLEM', -1) )
# Test the heritage for notification ways
host_template = self.sched.hosts.find_by_name("test_host_contact_template")
commands_contact_template_1 = host_template.contacts[0].get_notification_commands('host')
commands_contact_template_2 = host_template.contacts[1].get_notification_commands('host')
resp = sorted([sorted([command.get_name() for command in commands_contact_template_1]),
sorted([command.get_name() for command in commands_contact_template_2])])
self.assertEqual([['notify-host', 'notify-host-work'], ['notify-host-sms', 'notify-host-work']], resp)
commands_contact_template_1 = host_template.contacts[0].get_notification_commands('service')
commands_contact_template_2 = host_template.contacts[1].get_notification_commands('service')
resp = sorted([sorted([command.get_name() for command in commands_contact_template_1]),
sorted([command.get_name() for command in commands_contact_template_2])])
self.assertEqual([['notify-service', 'notify-service-work'], ['notify-service-sms', 'notify-service-work']],
resp)
if __name__ == '__main__':
unittest.main()
| 6,013
|
Python
|
.py
| 102
| 51.117647
| 137
| 0.68802
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,259
|
test_timeperiod_inheritance.py
|
shinken-solutions_shinken/test/test_timeperiod_inheritance.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_timeperiod_inheritance.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the Timeperiods")
now = time.time()
tp = self.sched.timeperiods.find_by_name("24x7")
print("TP", tp.__dict__)
# sunday should be inherited from templates
print("Check for sunday in the timeperiod")
got_sunday = False
for dr in tp.dateranges:
print(dr.__dict__)
if hasattr(dr, 'day') and dr.day == 'sunday':
got_sunday = True
self.assertEqual(True, got_sunday)
if __name__ == '__main__':
unittest.main()
| 1,798
|
Python
|
.py
| 46
| 34.304348
| 82
| 0.691734
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,260
|
test_problem_impact.py
|
shinken-solutions_shinken/test/test_problem_impact.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test host- and service-downtimes.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestProblemImpact(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_problem_impact.cfg')
def test_problems_impacts(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
# First initialize routers 0 and 1
now = time.time()
# The problem_impact_state change should be enabled in the configuration
self.assertEqual(True, self.conf.enable_problem_impacts_states_change)
host_router_0 = self.sched.hosts.find_by_name("test_router_0")
host_router_0.checks_in_progress = []
self.assertEqual(2, host_router_0.business_impact)
host_router_1 = self.sched.hosts.find_by_name("test_router_1")
host_router_1.checks_in_progress = []
self.assertEqual(2, host_router_1.business_impact)
# Then initialize host under theses routers
host_0 = self.sched.hosts.find_by_name("test_host_0")
host_0.checks_in_progress = []
host_1 = self.sched.hosts.find_by_name("test_host_1")
host_1.checks_in_progress = []
all_hosts = [host_router_0, host_router_1, host_0, host_1]
all_routers = [host_router_0, host_router_1]
all_servers = [host_0, host_1]
#--------------------------------------------------------------
# initialize host states as UP
#--------------------------------------------------------------
print("- 4 x UP -------------------------------------")
self.scheduler_loop(1, [[host_router_0, 0, 'UP'], [host_router_1, 0, 'UP'], [host_0, 0, 'UP'], [host_1, 0, 'UP']], do_sleep=False)
for h in all_hosts:
self.assertEqual('UP', h.state)
self.assertEqual('HARD', h.state_type)
#--------------------------------------------------------------
# Now we add some problems to routers
#--------------------------------------------------------------
print("- routers get DOWN /SOFT-------------------------------------")
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
# Max attempt is at 5, should be soft now
for h in all_routers:
self.assertEqual('DOWN', h.state)
self.assertEqual('SOFT', h.state_type)
print("- routers get DOWN /HARD-------------------------------------")
# Now put 4 more checks so we get DOWN/HARD
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
# Max attempt is reach, should be HARD now
for h in all_routers:
self.assertEqual('DOWN', h.state)
self.assertEqual('HARD', h.state_type)
#--------------------------------------------------------------
# Routers get HARD/DOWN
# should be problems now!
#--------------------------------------------------------------
# Now check in the brok generation too
host_router_0_brok = host_router_0.get_update_status_brok()
#host_router_0_brok.prepare()
host_router_1_brok = host_router_1.get_update_status_brok()
#host_router_1_brok.prepare()
# Should be problems and have sub servers as impacts
for h in all_routers:
self.assertEqual(True, h.is_problem)
# Now routers are problems, they should have take the max
# business_impact value ofthe impacts, so here 5
self.assertEqual(5, h.business_impact)
for s in all_servers:
self.assertIn(s, h.impacts)
self.assertIn(s.get_dbg_name(), host_router_0_brok.data['impacts']['hosts'])
self.assertIn(s.get_dbg_name(), host_router_1_brok.data['impacts']['hosts'])
# Should have host notification, but it's not so simple:
# our contact say: not under 5, and our hosts are 2. But
# the impacts have huge business_impact, so the hosts gain such business_impact
self.assert_any_log_match('HOST NOTIFICATION.*;')
self.show_and_clear_logs()
# Now impacts should really be .. impacts :)
for s in all_servers:
self.assertEqual(True, s.is_impact)
self.assertEqual('UNREACHABLE', s.state)
# And check the services are impacted too
for svc in s.services:
print("Service state", svc.state)
self.assertEqual('UNKNOWN', svc.state)
self.assertIn(svc.get_dbg_name(), host_router_0_brok.data['impacts']['services'])
self.assertIn(svc.get_dbg_name(), host_router_1_brok.data['impacts']['services'])
brk_svc = svc.get_update_status_brok()
#brk_svc.prepare()
self.assertEqual(['test_router_0', 'test_router_1'], brk_svc.data['source_problems']['hosts'])
for h in all_routers:
self.assertIn(h, s.source_problems)
brk_hst = s.get_update_status_brok()
#brk_hst.prepare()
self.assertIn(h.get_dbg_name(), brk_hst.data['source_problems']['hosts'])
#--------------------------------------------------------------
# One router get UP now
#--------------------------------------------------------------
print("- 1 X UP for a router ------------------------------")
# Ok here the problem/impact propagation is Checked. Now what
# if one router get back? :)
self.scheduler_loop(1, [[host_router_0, 0, 'UP']], do_sleep=False)
# should be UP/HARD now
self.assertEqual('UP', host_router_0.state)
self.assertEqual('HARD', host_router_0.state_type)
# And should not be a problem any more!
self.assertEqual(False, host_router_0.is_problem)
self.assertEqual([], host_router_0.impacts)
# And check if it's no more in sources problems of others servers
for s in all_servers:
# Still impacted by the other server
self.assertEqual(True, s.is_impact)
self.assertEqual([host_router_1], s.source_problems)
#--------------------------------------------------------------
# The other router get UP :)
#--------------------------------------------------------------
print("- 1 X UP for the last router ------------------------------")
# What is the last router get back? :)
self.scheduler_loop(1, [[host_router_1, 0, 'UP']], do_sleep=False)
# should be UP/HARD now
self.assertEqual('UP', host_router_1.state)
self.assertEqual('HARD', host_router_1.state_type)
# And should not be a problem any more!
self.assertEqual(False, host_router_1.is_problem)
self.assertEqual([], host_router_1.impacts)
# And check if it's no more in sources problems of others servers
for s in all_servers:
# Still impacted by the other server
self.assertEqual(False, s.is_impact)
self.assertEqual('UP', s.state)
self.assertEqual([], s.source_problems)
# And our "business_impact" should have failed back to our
# conf value, so 2
self.assertEqual(2, host_router_0.business_impact)
self.assertEqual(2, host_router_1.business_impact)
# It's done :)
def test_problems_impacts_with_crit_mod(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
# First initialize routers 0 and 1
now = time.time()
# The problem_impact_state change should be enabled in the configuration
self.assertEqual(True, self.conf.enable_problem_impacts_states_change)
host_router_0 = self.sched.hosts.find_by_name("test_router_0")
host_router_0.checks_in_progress = []
self.assertEqual(2, host_router_0.business_impact)
host_router_1 = self.sched.hosts.find_by_name("test_router_1")
host_router_1.checks_in_progress = []
self.assertEqual(2, host_router_1.business_impact)
# Then initialize host under theses routers
host_0 = self.sched.hosts.find_by_name("test_host_0")
host_0.checks_in_progress = []
host_1 = self.sched.hosts.find_by_name("test_host_1")
host_1.checks_in_progress = []
all_hosts = [host_router_0, host_router_1, host_0, host_1]
all_routers = [host_router_0, host_router_1]
all_servers = [host_0, host_1]
# Our crit mod that will allow us to play with on the fly
# business_impact modulation
critmod = self.sched.conf.businessimpactmodulations.find_by_name('Raise')
self.assertIsNot(critmod, None)
# We lie here, from now we do not want criticities
for h in all_hosts:
for s in h.services:
s.business_impact = 2
#--------------------------------------------------------------
# initialize host states as UP
#--------------------------------------------------------------
print("- 4 x UP -------------------------------------")
self.scheduler_loop(1, [[host_router_0, 0, 'UP'], [host_router_1, 0, 'UP'], [host_0, 0, 'UP'], [host_1, 0, 'UP']], do_sleep=False)
for h in all_hosts:
self.assertEqual('UP', h.state)
self.assertEqual('HARD', h.state_type)
#--------------------------------------------------------------
# Now we add some problems to routers
#--------------------------------------------------------------
print("- routers get DOWN /SOFT-------------------------------------")
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
# Max attempt is at 5, should be soft now
for h in all_routers:
self.assertEqual('DOWN', h.state)
self.assertEqual('SOFT', h.state_type)
print("- routers get DOWN /HARD-------------------------------------")
# Now put 4 more checks so we get DOWN/HARD
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
self.scheduler_loop(1, [[host_router_0, 2, 'DOWN'], [host_router_1, 2, 'DOWN']], do_sleep=False)
# Max attempt is reach, should be HARD now
for h in all_routers:
self.assertEqual('DOWN', h.state)
self.assertEqual('HARD', h.state_type)
#--------------------------------------------------------------
# Routers get HARD/DOWN
# should be problems now!
#--------------------------------------------------------------
# Now check in the brok generation too
host_router_0_brok = host_router_0.get_update_status_brok()
#host_router_0_brok.prepare()
host_router_1_brok = host_router_1.get_update_status_brok()
#host_router_1_brok.prepare()
# Should be problems and have sub servers as impacts
for h in all_routers:
self.assertEqual(True, h.is_problem)
# Now routers are problems, they should have take the max
# business_impact value ofthe impacts, so here 2 because we lower all critcity for our test
self.assertEqual(2, h.business_impact)
for s in all_servers:
self.assertIn(s, h.impacts)
self.assertIn(s.get_dbg_name(), host_router_0_brok.data['impacts']['hosts'])
self.assertIn(s.get_dbg_name(), host_router_1_brok.data['impacts']['hosts'])
# Should have host notification, but it's not so simple:
# our contact say: not under 5, and our hosts are 2. And here
# the business_impact was still low for our test
self.assert_no_log_match('HOST NOTIFICATION.*;')
self.show_and_clear_logs()
# Now impacts should really be .. impacts :)
for s in all_servers:
self.assertEqual(True, s.is_impact)
self.assertEqual('UNREACHABLE', s.state)
# And check the services are impacted too
for svc in s.services:
print("Service state", svc.state)
self.assertEqual('UNKNOWN', svc.state)
self.assertIn(svc.get_dbg_name(), host_router_0_brok.data['impacts']['services'])
self.assertIn(svc.get_dbg_name(), host_router_1_brok.data['impacts']['services'])
brk_svc = svc.get_update_status_brok()
#brk_svc.prepare()
self.assertEqual(['test_router_0', 'test_router_1'], brk_svc.data['source_problems']['hosts'])
for h in all_routers:
self.assertIn(h, s.source_problems)
brk_hst = s.get_update_status_brok()
#brk_hst.prepare()
self.assertIn(h.get_dbg_name(), brk_hst.data['source_problems']['hosts'])
for h in all_hosts:
for s in h.services:
s.update_business_impact_value()
self.assertEqual(2, s.business_impact)
# Now we play with modulation!
# We put modulation period as None so it will be right all time :)
critmod.modulation_period = None
crit_srv = self.sched.services.find_srv_by_name_and_hostname("test_host_1", "test_ok_1")
self.assertIn(critmod, crit_srv.business_impact_modulations)
# Now we set the modulation period as always good, we check that the service
# really update it's business_impact value
self.sched.update_business_values()
# So the service with the modulation should got it's business_impact raised
self.assertEqual(5, crit_srv.business_impact)
# And the routers too (problems)
self.assertEqual(5, host_router_0.business_impact)
self.assertEqual(5, host_router_1.business_impact)
#--------------------------------------------------------------
# One router get UP now
#--------------------------------------------------------------
print("- 1 X UP for a router ------------------------------")
# Ok here the problem/impact propagation is Checked. Now what
# if one router get back? :)
self.scheduler_loop(1, [[host_router_0, 0, 'UP']], do_sleep=False)
# should be UP/HARD now
self.assertEqual('UP', host_router_0.state)
self.assertEqual('HARD', host_router_0.state_type)
# And should not be a problem any more!
self.assertEqual(False, host_router_0.is_problem)
self.assertEqual([], host_router_0.impacts)
# And check if it's no more in sources problems of others servers
for s in all_servers:
# Still impacted by the other server
self.assertEqual(True, s.is_impact)
self.assertEqual([host_router_1], s.source_problems)
#--------------------------------------------------------------
# The other router get UP :)
#--------------------------------------------------------------
print("- 1 X UP for the last router ------------------------------")
# What is the last router get back? :)
self.scheduler_loop(1, [[host_router_1, 0, 'UP']], do_sleep=False)
# should be UP/HARD now
self.assertEqual('UP', host_router_1.state)
self.assertEqual('HARD', host_router_1.state_type)
# And should not be a problem any more!
self.assertEqual(False, host_router_1.is_problem)
self.assertEqual([], host_router_1.impacts)
# And check if it's no more in sources problems of others servers
for s in all_servers:
# Still impacted by the other server
self.assertEqual(False, s.is_impact)
self.assertEqual('UP', s.state)
self.assertEqual([], s.source_problems)
# And our "business_impact" should have failed back to our
# conf value, so 2
self.assertEqual(2, host_router_0.business_impact)
self.assertEqual(2, host_router_1.business_impact)
# It's done :)
if __name__ == '__main__':
unittest.main()
| 17,718
|
Python
|
.py
| 320
| 45.490625
| 138
| 0.559222
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,261
|
test_spaces_in_commands.py
|
shinken-solutions_shinken/test/test_spaces_in_commands.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_spaces_in_commands.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_port_2")
## for a in host.actions:
## a.t_to_go = 0
svc.schedule()
for a in svc.actions:
a.t_to_go = 0
# the scheduler need to get this new checks in its own queues
self.sched.get_new_actions()
untaggued_checks = self.sched.get_to_run_checks(True, False, poller_tags=['None'])
cc = untaggued_checks[0]
# There must still be a sequence of 10 blanks
self.assertNotEqual(cc.command.find("Port 2 "), -1)
if __name__ == '__main__':
unittest.main()
| 2,118
|
Python
|
.py
| 51
| 36.627451
| 93
| 0.680097
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,262
|
test_nested_hostgroups.py
|
shinken-solutions_shinken/test/test_nested_hostgroups.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestNestedHostgroups(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_nested_hostgroups.cfg')
# We got the service "NestedService" apply in High level
# group. And this one got a sub group, low one. each got ONE
# Host, so we must have this servie on both.
def test_lookup_nested_hostgroups(self):
host = self.sched.hosts.find_by_name("test_host_0")
router = self.sched.hosts.find_by_name("test_router_0")
hg_high = self.sched.conf.hostgroups.find_by_name('high_level')
self.assertIsNot(hg_high, None)
self.assertIn(host, hg_high.members)
self.assertIn(router, hg_high.members)
hg_low = self.sched.conf.hostgroups.find_by_name('low_level')
self.assertIsNot(hg_low, None)
self.assertIn(host, hg_low.members)
svc1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "NestedService")
self.assertIsNot(svc1, None)
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_router_0", "NestedService")
self.assertIsNot(svc2, None)
# And now look for the service testHostToGroup apply on the group
# high_level, and the host test_host_2 should be on it, so it must have
# this service too
host2 = self.sched.hosts.find_by_name("test_host_2")
self.assertIn(host2, hg_high.members)
svc3 = self.sched.services.find_srv_by_name_and_hostname("test_host_2", "testHostToGroup")
self.assertIsNot(svc3, None)
# And same with a host in the low_group, should have it too
host3 = self.sched.hosts.find_by_name("test_host_3")
self.assertIn(host3, hg_high.members)
svc4 = self.sched.services.find_srv_by_name_and_hostname("test_host_3", "testHostToGroup")
self.assertIsNot(svc4, None)
if __name__ == '__main__':
unittest.main()
| 2,883
|
Python
|
.py
| 58
| 44.5
| 98
| 0.707785
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,263
|
test_notification_warning.py
|
shinken-solutions_shinken/test/test_notification_warning.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
from shinken.notification import Notification
class TestConfig(ShinkenTest):
# setUp is inherited from ShinkenTest
def test_raise_warning_on_notification_errors(self):
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
cmd = "/error/pl"
# Create a dummy notif
n = Notification('PROBLEM', 'scheduled', 'BADCOMMAND', cmd, host, None, 0)
n.execute()
time.sleep(0.2)
if n.status is not 'done':
n.check_finished(8000)
print(n.__dict__)
self.sched.actions[n.id] = n
self.sched.put_results(n)
# Should have raised something like "Warning: the notification command 'BADCOMMAND' raised an error (exit code=2): '[Errno 2] No such file or directory'"
# Ok, in HUDSON, we got a problem here. so always run with a shell run before release please
if os.environ.get('HUDSON_URL', None):
return
self.assert_any_log_match('.*BADCOMMAND.*')
#self.assert_any_log_match(u'.*BADCOMMAND.*') or self.assert_any_log_match('.*BADCOMMAND.*')
if __name__ == '__main__':
unittest.main()
| 2,206
|
Python
|
.py
| 49
| 40.306122
| 161
| 0.697718
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,264
|
test_update_output_ext_command.py
|
shinken-solutions_shinken/test/test_update_output_ext_command.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestUpdateOutputExtCommand(ShinkenTest):
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
cmd = "[%lu] PROCESS_SERVICE_OUTPUT;test_host_0;test_ok_0;My ass is cool | toto=30%%" % now
self.sched.run_external_command(cmd)
self.scheduler_loop(2, [])
print(svc.perf_data)
self.assertEqual('toto=30%', svc.perf_data)
if __name__ == '__main__':
unittest.main()
| 1,919
|
Python
|
.py
| 45
| 38.266667
| 99
| 0.697749
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,265
|
test_reactionner_tag_get_notif.py
|
shinken-solutions_shinken/test/test_reactionner_tag_get_notif.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestReactionnerTagGetNotifs(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_reactionner_tag_get_notif.cfg')
# For a service, we generate a notification and a event handler.
# Each one got a specific reactionner_tag that we will look for.
def test_good_checks_get_only_tags_with_specific_tags(self):
now = int(time.time())
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 0, 'BAD | value1=0 value2=0']])
print("Go bad now")
self.scheduler_loop(2, [[svc, 2, 'BAD | value1=0 value2=0']])
to_del = []
for a in self.sched.actions.values():
print("\n\nA?", a, "\nZZZ%sZZZ" % a.command)
# Set them go NOW
a.t_to_go = now
# In fact they are already launched, so we-reenabled them :)
print("AHAH?", a.status, a.__class__.my_type)
if a.__class__.my_type == 'notification' and (a.status == 'zombie' or a.status == ' scheduled'):
to_del.append(a.id)
a.status = 'scheduled'
# And look for good tagging
if a.command.startswith('plugins/notifier.pl'):
print('TAG:%s' % a.reactionner_tag)
self.assertEqual('runonwindows', a.reactionner_tag)
if a.command.startswith('plugins/sms.pl'):
print('TAG:%s' % a.reactionner_tag)
self.assertEqual('sms', a.reactionner_tag)
if a.command.startswith('plugins/test_eventhandler.pl'):
print('TAG: %s' % a.reactionner_tag)
self.assertEqual('eventtag', a.reactionner_tag)
print("\n\n")
for _i in to_del:
print("DELETING", self.sched.actions[_i])
del self.sched.actions[_i]
print("NOW ACTION!"*20,'\n\n')
# Ok the tags are defined as it should, now try to get them as a reactionner :)
# Now get only tag ones
taggued_runonwindows_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['runonwindows'])
self.assertGreater(len(taggued_runonwindows_checks), 0)
for c in taggued_runonwindows_checks:
# Should be the host one only
self.assertTrue(c.command.startswith('plugins/notifier.pl'))
# Ok the tags are defined as it should, now try to get them as a reactionner :)
# Now get only tag ones
taggued_sms_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['sms'])
self.assertGreater(len(taggued_sms_checks), 0)
for c in taggued_sms_checks:
# Should be the host one only
self.assertTrue(c.command.startswith('plugins/sms.pl'))
taggued_eventtag_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['eventtag'])
self.assertGreater(len(taggued_eventtag_checks), 0)
for c in taggued_eventtag_checks:
# Should be the host one only
self.assertTrue(c.command.startswith('plugins/test_eventhandler.pl'))
# Same that upper, but with modules types
def test_good_checks_get_only_tags_with_specific_tags_andmodule_types(self):
now = int(time.time())
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 0, 'BAD | value1=0 value2=0']])
print("Go bad now")
self.scheduler_loop(2, [[svc, 2, 'BAD | value1=0 value2=0']])
for a in self.sched.actions.values():
# Set them go NOW
a.t_to_go = now
# In fact they are already launched, so we-reenabled them :)
a.status = 'scheduled'
# And look for good tagging
if a.command.startswith('plugins/notifier.pl'):
print(a.__dict__)
print(a.reactionner_tag)
self.assertEqual('runonwindows', a.reactionner_tag)
if a.command.startswith('plugins/test_eventhandler.pl'):
print(a.__dict__)
print(a.reactionner_tag)
self.assertEqual('eventtag', a.reactionner_tag)
# Ok the tags are defined as it should, now try to get them as a reactionner :)
# Now get only tag ones
taggued_runonwindows_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['runonwindows'], module_types=['fork'])
self.assertGreater(len(taggued_runonwindows_checks), 0)
for c in taggued_runonwindows_checks:
# Should be the host one only
self.assertTrue(c.command.startswith('plugins/notifier.pl'))
taggued_eventtag_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['eventtag'], module_types=['myassischicken'])
self.assertEqual(0, len(taggued_eventtag_checks))
if __name__ == '__main__':
unittest.main()
| 6,883
|
Python
|
.py
| 127
| 45.141732
| 139
| 0.635904
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,266
|
test_poller_addition.py
|
shinken-solutions_shinken/test/test_poller_addition.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken_test import ShinkenTest, unittest
from shinken.external_command import ExternalCommand
from shinken.objects.brokerlink import BrokerLink
from shinken.objects.arbiterlink import ArbiterLink
from shinken.objects.pollerlink import PollerLink
from shinken.objects.reactionnerlink import ReactionnerLink
from shinken.objects.schedulerlink import SchedulerLink
class GoodArbiter(ArbiterLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def have_conf(self, i):
return True
def do_not_run(self):
pass
class GoodScheduler(SchedulerLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def have_conf(self, i):
return True
def put_conf(self, conf):
return True
class BadScheduler(SchedulerLink):
def ping(self):
print("Dummy bad ping", self.get_name())
self.add_failed_check_attempt()
def have_conf(self, i):
return False
class GoodPoller(PollerLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def put_conf(self, conf):
return True
class BadPoller(PollerLink):
def ping(self):
print("Dummy bad ping", self.get_name())
self.add_failed_check_attempt()
class GoodReactionner(ReactionnerLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def put_conf(self, conf):
return True
class BadReactionner(ReactionnerLink):
def ping(self):
print("Dummy bad ping", self.get_name())
self.add_failed_check_attempt()
class GoodBroker(BrokerLink):
# To lie about satellites
def ping(self):
print("Dummy OK for", self.get_name())
self.set_alive()
def put_conf(self, conf):
return True
class BadBroker(BrokerLink):
def ping(self):
print("Dummy bad ping", self.get_name())
self.add_failed_check_attempt()
class TestPollerAddition(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_dispatcher.cfg')
def test_simple_dispatch_and_addition(self):
print("The dispatcher", self.dispatcher)
# dummy for the arbiter
for a in self.conf.arbiters:
a.__class__ = GoodArbiter
print("Preparing schedulers")
scheduler1 = self.conf.schedulers.find_by_name('scheduler-all-1')
self.assertIsNot(scheduler1, None)
scheduler1.__class__ = GoodScheduler
scheduler2 = self.conf.schedulers.find_by_name('scheduler-all-2')
self.assertIsNot(scheduler2, None)
scheduler2.__class__ = BadScheduler
print("Preparing pollers")
poller1 = self.conf.pollers.find_by_name('poller-all-1')
self.assertIsNot(poller1, None)
poller1.__class__ = GoodPoller
poller2 = self.conf.pollers.find_by_name('poller-all-2')
self.assertIsNot(poller2, None)
poller2.__class__ = BadPoller
print("Preparing reactionners")
reactionner1 = self.conf.reactionners.find_by_name('reactionner-all-1')
self.assertIsNot(reactionner1, None)
reactionner1.__class__ = GoodReactionner
reactionner2 = self.conf.reactionners.find_by_name('reactionner-all-2')
self.assertIsNot(reactionner2, None)
reactionner2.__class__ = BadReactionner
print("Preparing brokers")
broker1 = self.conf.brokers.find_by_name('broker-all-1')
self.assertIsNot(broker1, None)
broker1.__class__ = GoodBroker
broker2 = self.conf.brokers.find_by_name('broker-all-2')
self.assertIsNot(broker2, None)
broker2.__class__ = BadBroker
# Ping all elements. Should have 1 as OK, 2 as
# one bad attempt (3 max)
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, scheduler1.alive)
self.assertEqual(0, scheduler1.attempt)
self.assertEqual(True, scheduler1.reachable)
# still alive, just unreach
self.assertEqual(True, scheduler2.alive)
self.assertEqual(1, scheduler2.attempt)
self.assertEqual(False, scheduler2.reachable)
# and others satellites too
self.assertEqual(True, poller1.alive)
self.assertEqual(0, poller1.attempt)
self.assertEqual(True, poller1.reachable)
# still alive, just unreach
self.assertEqual(True, poller2.alive)
self.assertEqual(1, poller2.attempt)
self.assertEqual(False, poller2.reachable)
# and others satellites too
self.assertEqual(True, reactionner1.alive)
self.assertEqual(0, reactionner1.attempt)
self.assertEqual(True, reactionner1.reachable)
# still alive, just unreach
self.assertEqual(True, reactionner2.alive)
self.assertEqual(1, reactionner2.attempt)
self.assertEqual(False, reactionner2.reachable)
# and others satellites too
self.assertEqual(True, broker1.alive)
self.assertEqual(0, broker1.attempt)
self.assertEqual(True, broker1.reachable)
# still alive, just unreach
self.assertEqual(True, broker2.alive)
self.assertEqual(1, broker2.attempt)
self.assertEqual(False, broker2.reachable)
time.sleep(60)
### Now add another attempt, still alive, but attemp=2/3
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, scheduler1.alive)
self.assertEqual(0, scheduler1.attempt)
self.assertEqual(True, scheduler1.reachable)
# still alive, just unreach
self.assertEqual(True, scheduler2.alive)
self.assertEqual(2, scheduler2.attempt)
self.assertEqual(False, scheduler2.reachable)
# and others satellites too
self.assertEqual(True, poller1.alive)
self.assertEqual(0, poller1.attempt)
self.assertEqual(True, poller1.reachable)
# still alive, just unreach
self.assertEqual(True, poller2.alive)
self.assertEqual(2, poller2.attempt)
self.assertEqual(False, poller2.reachable)
# and others satellites too
self.assertEqual(True, reactionner1.alive)
self.assertEqual(0, reactionner1.attempt)
self.assertEqual(True, reactionner1.reachable)
# still alive, just unreach
self.assertEqual(True, reactionner2.alive)
self.assertEqual(2, reactionner2.attempt)
self.assertEqual(False, reactionner2.reachable)
# and others satellites too
self.assertEqual(True, broker1.alive)
self.assertEqual(0, broker1.attempt)
self.assertEqual(True, broker1.reachable)
# still alive, just unreach
self.assertEqual(True, broker2.alive)
self.assertEqual(2, broker2.attempt)
self.assertEqual(False, broker2.reachable)
time.sleep(60)
### Now we get BAD, We go DEAD for N2!
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, scheduler1.alive)
self.assertEqual(0, scheduler1.attempt)
self.assertEqual(True, scheduler1.reachable)
# still alive, just unreach
self.assertEqual(False, scheduler2.alive)
self.assertEqual(3, scheduler2.attempt)
self.assertEqual(False, scheduler2.reachable)
# and others satellites too
self.assertEqual(True, poller1.alive)
self.assertEqual(0, poller1.attempt)
self.assertEqual(True, poller1.reachable)
# still alive, just unreach
self.assertEqual(False, poller2.alive)
self.assertEqual(3, poller2.attempt)
self.assertEqual(False, poller2.reachable)
# and others satellites too
self.assertEqual(True, reactionner1.alive)
self.assertEqual(0, reactionner1.attempt)
self.assertEqual(True, reactionner1.reachable)
# still alive, just unreach
self.assertEqual(False, reactionner2.alive)
self.assertEqual(3, reactionner2.attempt)
self.assertEqual(False, reactionner2.reachable)
# and others satellites too
self.assertEqual(True, broker1.alive)
self.assertEqual(0, broker1.attempt)
self.assertEqual(True, broker1.reachable)
# still alive, just unreach
self.assertEqual(False, broker2.alive)
self.assertEqual(3, broker2.attempt)
self.assertEqual(False, broker2.reachable)
# Now we check how we should dispatch confs
self.dispatcher.check_dispatch()
# the conf should not be in a good shape
self.assertEqual(False, self.dispatcher.dispatch_ok)
# Now we really dispatch them!
self.dispatcher.dispatch()
self.assert_any_log_match('Dispatch OK of conf in scheduler scheduler-all-1')
self.assert_any_log_match('Dispatch OK of configuration 0 to reactionner reactionner-all-1')
self.assert_any_log_match('Dispatch OK of configuration 0 to poller poller-all-1')
self.assert_any_log_match('Dispatch OK of configuration 0 to broker broker-all-1')
self.clear_logs()
# And look if we really dispatch conf as we should
for r in self.conf.realms:
for cfg in r.confs.values():
self.assertEqual(True, cfg.is_assigned)
self.assertEqual(scheduler1, cfg.assigned_to)
cmd = "[%lu] ADD_SIMPLE_POLLER;All;newpoller;localhost;7771" % int(time.time())
ext_cmd = ExternalCommand(cmd)
self.external_command_dispatcher.resolve_command(ext_cmd)
# Look for the poller now
newpoller = self.conf.pollers.find_by_name('newpoller')
self.assertIsNot(newpoller, None)
newpoller.__class__ = GoodPoller
### Wht now with our new poller object?
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, newpoller.alive)
self.assertEqual(0, newpoller.attempt)
self.assertEqual(True, newpoller.reachable)
# Now we check how we should dispatch confs
self.dispatcher.check_bad_dispatch()
self.dispatcher.dispatch()
if __name__ == '__main__':
unittest.main()
| 11,338
|
Python
|
.py
| 263
| 35.429658
| 100
| 0.684832
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,267
|
test_bad_contact_call.py
|
shinken-solutions_shinken/test/test_bad_contact_call.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_contact_call.cfg')
def test_bad_contact_call(self):
# The service got a unknow contact. It should raise an error
svc = self.conf.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
print("Contacts:", svc.contacts)
self.assertEqual(False, svc.is_correct())
if __name__ == '__main__':
unittest.main()
| 1,451
|
Python
|
.py
| 34
| 39.882353
| 90
| 0.738636
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,268
|
test_star_in_hostgroups.py
|
shinken-solutions_shinken/test/test_star_in_hostgroups.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestStarInGroups(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_star_in_hostgroups.cfg')
# If we reach a good start, we are ok :)
# the bug was that an * hostgroup expand get all host_name != ''
# without looking at register 0 or not
def test_star_in_groups(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST")
self.assertIsNot(svc, None)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST_HNAME_STAR")
self.assertIsNot(svc, None)
if __name__ == '__main__':
unittest.main()
| 2,117
|
Python
|
.py
| 49
| 38.755102
| 97
| 0.69694
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,269
|
test_macroresolver.py
|
shinken-solutions_shinken/test/test_macroresolver.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
from shinken.macroresolver import MacroResolver
from shinken.commandcall import CommandCall
from shinken.objects import Command
class TestMacroResolver(ShinkenTest):
# setUp is inherited from ShinkenTest
def setUp(self):
self.setup_with_file('etc/shinken_macroresolver.cfg')
def get_mr(self):
mr = MacroResolver()
mr.init(self.conf)
return mr
def get_hst_svc(self):
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
hst = self.sched.hosts.find_by_name("test_host_0")
return (svc, hst)
def test_resolv_simple(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
com = mr.resolve_command(svc.check_command, data)
print(com)
self.assertEqual("plugins/test_servicecheck.pl --type=ok --failchance=5% --previous-state=PENDING --state-duration=0 --total-critical-on-host=0 --total-warning-on-host=0 --hostname test_host_0 --servicedesc test_ok_0 --custom custvalue", com)
# Here call with a special macro TOTALHOSTSUP
# but call it as arg. So will need 2 pass in macro resolver
# at last to resolv it.
def test_special_macros(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
hst.state = 'UP'
dummy_call = "special_macro!$TOTALHOSTSUP$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing 1', com)
# Here call with a special macro HOSTREALM
def test_special_macros_realm(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
hst.state = 'UP'
dummy_call = "special_macro!$HOSTREALM$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing Default', com)
# For output macro we want to delete all illegal macro caracter
def test_illegal_macro_output_chars(self):
"$HOSTOUTPUT$, $HOSTPERFDATA$, $HOSTACKAUTHOR$, $HOSTACKCOMMENT$, $SERVICEOUTPUT$, $SERVICEPERFDATA$, $SERVICEACKAUTHOR$, and $SERVICEACKCOMMENT$ "
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
illegal_macro_output_chars = self.sched.conf.illegal_macro_output_chars
print("Illegal macros caracters:", illegal_macro_output_chars)
hst.output = 'monculcestdupoulet'
dummy_call = "special_macro!$HOSTOUTPUT$"
for c in illegal_macro_output_chars:
hst.output = 'monculcestdupoulet' + c
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing monculcestdupoulet', com)
def test_env_macros(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
data.append(self.conf)
env = mr.get_env_macros(data)
print("Env:", env )
self.assertNotEqual(env, {})
self.assertEqual('test_host_0', env['NAGIOS_HOSTNAME'])
self.assertEqual('0.0', env['NAGIOS_SERVICEPERCENTCHANGE'])
self.assertEqual('custvalue', env['NAGIOS__SERVICECUSTNAME'])
self.assertEqual('gnulinux', env['NAGIOS__HOSTOSTYPE'])
self.assertNotIn('NAGIOS_USER1', env)
def test_resource_file(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
dummy_call = "special_macro!$USER1$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
self.assertEqual('plugins/nothing plugins', com)
dummy_call = "special_macro!$INTERESTINGVARIABLE$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print("CUCU", com)
self.assertEqual('plugins/nothing interestingvalue', com)
# Look for multiple = in lines, should split the first
# and keep others in the macro value
dummy_call = "special_macro!$ANOTHERVALUE$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print("CUCU", com)
self.assertEqual('plugins/nothing blabla=toto', com)
# Look at on demand macros
def test_ondemand_macros(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = hst.get_data_for_checks()
hst.state = 'UP'
svc.state = 'UNKNOWN'
# Ok sample host call
dummy_call = "special_macro!$HOSTSTATE:test_host_0$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing UP', com)
# Call with a void host name, means : myhost
data = hst.get_data_for_checks()
dummy_call = "special_macro!$HOSTSTATE:$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing UP', com)
# Now with a service, for our implicit host state
data = svc.get_data_for_checks()
dummy_call = "special_macro!$HOSTSTATE:test_host_0$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing UP', com)
# Now with a service, for our implicit host state
data = svc.get_data_for_checks()
dummy_call = "special_macro!$HOSTSTATE:$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing UP', com)
# Now prepare another service
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_another_service")
svc2.output = 'you should not pass'
# Now call this data from our previous service
data = svc.get_data_for_checks()
dummy_call = "special_macro!$SERVICEOUTPUT:test_host_0:test_another_service$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing you should not pass', com)
# Ok now with a host implicit way
data = svc.get_data_for_checks()
dummy_call = "special_macro!$SERVICEOUTPUT::test_another_service$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing you should not pass', com)
# Look at on demand macros
def test_hostadressX_macros(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = hst.get_data_for_checks()
# Ok sample host call
dummy_call = "special_macro!$HOSTADDRESS6$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print(com)
self.assertEqual('plugins/nothing ::1', com)
if __name__ == '__main__':
unittest.main()
| 8,366
|
Python
|
.py
| 183
| 38.081967
| 250
| 0.652131
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,270
|
test_notifications.py
|
shinken-solutions_shinken/test/test_notifications.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test host- and service-downtimes.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken_test import unittest, ShinkenTest
from shinken_test import time_hacker
class TestNotif(ShinkenTest):
def test_continuous_notifications(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print("- 1 x OK -------------------------------------")
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print("- 1 x BAD get soft -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print("---current_notification_number", svc.current_notification_number)
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print("- 1 x BAD get hard -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
#self.show_and_clear_actions()
self.show_actions()
print(svc.notifications_in_progress)
for n in svc.notifications_in_progress.values():
print(n)
# check_notification: yes (hard)
print("---current_notification_number", svc.current_notification_number)
# notification_number is already sent. the next one has been scheduled
# and is waiting for notification_interval to pass. so the current
# number is 2
self.assertEqual(1, svc.current_notification_number)
print("---------------------------------1st round with a hard")
print("find a way to get the number of the last reaction")
cnn = svc.current_notification_number
print("- 5 x BAD repeat -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
print("cnn and cur", cnn, svc.current_notification_number)
self.assertGreater(svc.current_notification_number, cnn)
cnn = svc.current_notification_number
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
print("svc.current_notification_number, cnn", svc.current_notification_number, cnn)
self.assertGreater(svc.current_notification_number, cnn)
#--------------------------------------------------------------
# 2 cycles = 2 minutes = 2 new notifications
#--------------------------------------------------------------
cnn = svc.current_notification_number
self.scheduler_loop(2, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
print("svc.current_notification_number, cnn", svc.current_notification_number, cnn)
self.assertGreater(svc.current_notification_number, cnn)
#--------------------------------------------------------------
# 2 cycles = 2 minutes = 2 new notifications (theoretically)
# BUT: test_contact filters notifications
# we do not raise current_notification_number if no mail was sent
#--------------------------------------------------------------
now = time.time()
cmd = "[%lu] DISABLE_CONTACT_SVC_NOTIFICATIONS;test_contact" % now
self.sched.run_external_command(cmd)
cnn = svc.current_notification_number
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
self.assertEqual(cnn, svc.current_notification_number)
#--------------------------------------------------------------
# again a normal cycle
# test_contact receives his mail
#--------------------------------------------------------------
now = time.time()
cmd = "[%lu] ENABLE_CONTACT_SVC_NOTIFICATIONS;test_contact" % now
self.sched.run_external_command(cmd)
#cnn = svc.current_notification_number
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
print("svc.current_notification_number, cnn", svc.current_notification_number, cnn)
self.assertEqual(cnn + 1, svc.current_notification_number)
#--------------------------------------------------------------
# now recover. there must be no scheduled/inpoller notification
#--------------------------------------------------------------
self.scheduler_loop(1, [[svc, 0, 'GOOD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_and_clear_actions()
self.assertEqual(0, svc.current_notification_number)
def test_continuous_notifications_delayed(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001 # and send imediatly then
svc.first_notification_delay = 0.1 # set 6s for first notif delay
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=1)
#-----------------------------------------------------------------
# initialize with a good check. there must be no pending notification
#-----------------------------------------------------------------
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=1)
self.show_and_clear_logs()
self.show_and_clear_actions()
self.assertEqual(0, svc.current_notification_number)
#-----------------------------------------------------------------
# check fails and enters soft state.
# there must be no notification, only the event handler
#-----------------------------------------------------------------
self.scheduler_loop(1, [[svc, 1, 'BAD']], do_sleep=True, sleep_time=1)
self.assertEqual(1, self.count_actions())
now = time.time()
print(svc.last_time_warning, svc.last_time_critical, svc.last_time_unknown, svc.last_time_ok)
last_time_not_ok = svc.last_time_non_ok_or_up()
deadline = svc.last_time_non_ok_or_up() + svc.first_notification_delay * svc.__class__.interval_length
print("deadline is in %s secs" % (deadline - now))
#-----------------------------------------------------------------
# check fails again and enters hard state.
# now there is a (scheduled for later) notification and an event handler
# current_notification_number is still 0, until notifications
# have actually been sent
#-----------------------------------------------------------------
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
self.assertEqual(0, svc.current_notification_number)
# sleep up to deadline:
time_hacker.time_warp(deadline - now)
# even if time_hacker is used here, we still call time.sleep()
# to show that we must wait the necessary delay time:
time.sleep(deadline - now)
#-----------------------------------------------------------------
# now the delay period is over and the notification can be sent
# with the next bad check
# there is 1 action, the notification (
# 1 notification was sent, so current_notification_number is 1
#-----------------------------------------------------------------
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=1)
print("Counted actions", self.count_actions())
self.assertEqual(2, self.count_actions())
# 1 master, 1 child
self.assertEqual(1, svc.current_notification_number)
self.show_actions()
self.assertEqual(1, len(svc.notifications_in_progress)) # master is zombieand removed_from_in_progress
self.show_logs()
self.assert_log_match(1, 'SERVICE NOTIFICATION.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
#-----------------------------------------------------------------
# relax with a successful check
# there are 2 actions, one notification and one eventhandler
# current_notification_number was reset to 0
#-----------------------------------------------------------------
self.scheduler_loop(2, [[svc, 0, 'GOOD']], do_sleep=True, sleep_time=1)
self.assert_log_match(1, 'SERVICE ALERT.*;OK;')
self.assert_log_match(2, 'SERVICE EVENT HANDLER.*;OK;')
self.assert_log_match(3, 'SERVICE NOTIFICATION.*;OK;')
# evt reap 2 loops
self.assertEqual(0, svc.current_notification_number)
self.assertEqual(0, len(svc.notifications_in_progress))
self.assertEqual(0, len(svc.notified_contacts))
#self.assertEqual(2, self.count_actions())
self.show_and_clear_logs()
self.show_and_clear_actions()
def test_continuous_notifications_delayed_recovers_fast(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.first_notification_delay = 5
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
#-----------------------------------------------------------------
# initialize with a good check. there must be no pending notification
#-----------------------------------------------------------------
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_and_clear_actions()
self.assertEqual(0, svc.current_notification_number)
#-----------------------------------------------------------------
# check fails and enters soft state.
# there must be no notification, only the event handler
#-----------------------------------------------------------------
self.scheduler_loop(1, [[svc, 1, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
self.assertEqual(1, self.count_actions())
#-----------------------------------------------------------------
# check fails again and enters hard state.
# now there is a (scheduled for later) notification and an event handler
# current_notification_number is still 0 (will be raised when
# a notification is actually sent)
#-----------------------------------------------------------------
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
self.assertEqual(2, self.count_actions())
self.assertEqual(0, svc.current_notification_number)
#-----------------------------------------------------------------
# repeat bad checks during the delay time
# but only one time. we don't want to reach the deadline
# there is one action: the pending notification
#-----------------------------------------------------------------
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
self.assertEqual(1, self.count_actions())
#-----------------------------------------------------------------
# relax with a successful check
# there is 1 action, the eventhandler.
#-----------------------------------------------------------------
self.scheduler_loop(1, [[svc, 0, 'GOOD']], do_sleep=True, sleep_time=0.1)
self.assert_log_match(1, 'SERVICE ALERT.*;OK;')
self.assert_log_match(2, 'SERVICE EVENT HANDLER.*;OK;')
self.assert_log_match(3, 'SERVICE NOTIFICATION.*;OK;',
no_match=True)
self.show_actions()
self.assertEqual(0, len(svc.notifications_in_progress))
self.assertEqual(0, len(svc.notified_contacts))
self.assertEqual(1, self.count_actions())
self.show_and_clear_logs()
self.show_and_clear_actions()
def test_host_in_downtime_or_down_service_critical(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_logs()
self.show_actions()
self.assert_log_match(1, 'SERVICE ALERT.*;CRITICAL;SOFT')
self.assert_log_match(2, 'SERVICE EVENT HANDLER.*;CRITICAL;SOFT')
self.assert_log_match(3, 'SERVICE ALERT.*;CRITICAL;HARD')
self.assert_log_match(4, 'SERVICE EVENT HANDLER.*;CRITICAL;HARD')
self.assert_log_match(5, 'SERVICE NOTIFICATION.*;CRITICAL;')
self.assertEqual(1, svc.current_notification_number)
self.clear_logs()
self.clear_actions()
#--------------------------------------------------------------
# reset host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
duration = 300
now = time.time()
# fixed downtime valid for the next 5 minutes
cmd = "[%lu] SCHEDULE_HOST_DOWNTIME;test_host_0;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self.sched.run_external_command(cmd)
#--------------------------------------------------------------
# service reaches hard;2
# no notificatio
#--------------------------------------------------------------
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('HOST NOTIFICATION.*;DOWNTIMESTART')
self.assert_no_log_match('SERVICE NOTIFICATION.*;CRITICAL;')
self.show_and_clear_logs()
self.show_and_clear_actions()
def test_only_notified_contacts_notifications(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# We want the contact to do not have a mail, so we remove tyhe 'u'
test_contact = self.sched.contacts.find_by_name('test_contact')
for nw in test_contact.notificationways:
nw.service_notification_options.remove('u')
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print("- 1 x OK -------------------------------------")
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print("- 1 x BAD get soft -------------------------------------")
self.scheduler_loop(1, [[svc, 3, 'UNKNOWN']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print("---current_notification_number", svc.current_notification_number)
print("Contact we notified", svc.notified_contacts)
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print("- 1 x BAD get hard -------------------------------------")
self.scheduler_loop(1, [[svc, 3, 'UNKNOWN']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
#self.show_and_clear_actions()
print("TOTO2")
self.show_actions()
print("notif in progress", svc.notifications_in_progress)
for n in svc.notifications_in_progress.values():
print("TOTO", n.__dict__)
# check_notification: yes (hard)
print("---current_notification_number", svc.current_notification_number)
# The contact refuse our notification, so we are still at 0
self.assertEqual(0, svc.current_notification_number)
print("---------------------------------1st round with a hard")
print("find a way to get the number of the last reaction")
cnn = svc.current_notification_number
print("- 5 x BAD repeat -------------------------------------")
self.scheduler_loop(1, [[svc, 3, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
print("cnn and cur", cnn, svc.current_notification_number)
cnn = svc.current_notification_number
self.scheduler_loop(1, [[svc, 3, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
print("svc.current_notification_number, cnn", svc.current_notification_number, cnn)
#--------------------------------------------------------------
# 2 cycles = 2 minutes = 2 new notifications
#--------------------------------------------------------------
cnn = svc.current_notification_number
self.scheduler_loop(2, [[svc, 3, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
print("svc.current_notification_number, cnn", svc.current_notification_number, cnn)
#--------------------------------------------------------------
# 2 cycles = 2 minutes = 2 new notifications (theoretically)
# BUT: test_contact filters notifications
# we do not raise current_notification_number if no mail was sent
#--------------------------------------------------------------
now = time.time()
cmd = "[%lu] DISABLE_CONTACT_SVC_NOTIFICATIONS;test_contact" % now
self.sched.run_external_command(cmd)
cnn = svc.current_notification_number
self.scheduler_loop(1, [[svc, 3, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
self.assertEqual(cnn, svc.current_notification_number)
#--------------------------------------------------------------
# again a normal cycle
# test_contact receives his mail
#--------------------------------------------------------------
now = time.time()
cmd = "[%lu] ENABLE_CONTACT_SVC_NOTIFICATIONS;test_contact" % now
self.sched.run_external_command(cmd)
#cnn = svc.current_notification_number
self.scheduler_loop(1, [[svc, 3, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_and_clear_logs()
self.show_actions()
print("svc.current_notification_number, cnn", svc.current_notification_number, cnn)
#self.assertEqual(cnn + 1, svc.current_notification_number)
#--------------------------------------------------------------
# now recover. there must be no scheduled/inpoller notification
#--------------------------------------------------------------
self.scheduler_loop(1, [[svc, 0, 'GOOD']], do_sleep=True, sleep_time=0.1)
# I do not want a notification of a recovery because
# the user did not have the notif first!
self.assert_no_log_match('notify-service')
self.show_and_clear_logs()
self.show_and_clear_actions()
self.assertEqual(0, svc.current_notification_number)
def test_svc_in_dt_and_crit_and_notif_interval_0(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.notification_interval = 0
host.notification_options = 'c'
svc.notification_options = 'c'
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_logs()
self.show_actions()
self.assert_log_match(1, 'SERVICE ALERT.*;CRITICAL;SOFT')
self.assert_log_match(2, 'SERVICE EVENT HANDLER.*;CRITICAL;SOFT')
self.assert_log_match(3, 'SERVICE ALERT.*;CRITICAL;HARD')
self.assert_log_match(4, 'SERVICE EVENT HANDLER.*;CRITICAL;HARD')
self.assert_log_match(5, 'SERVICE NOTIFICATION.*;CRITICAL;')
self.assertEqual(1, svc.current_notification_number)
self.clear_logs()
self.clear_actions()
#--------------------------------------------------------------
# reset host/service state
#--------------------------------------------------------------
#self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
#self.assertEqual(0, svc.current_notification_number)
duration = 2
now = time.time()
# fixed downtime valid for the next 5 minutes
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self.sched.run_external_command(cmd)
#--------------------------------------------------------------
# service reaches hard;2
# no notificatio
#--------------------------------------------------------------
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE DOWNTIME ALERT.*;STARTED')
self.assert_no_log_match('SERVICE NOTIFICATION.*;CRITICAL;')
# To get out of the DT.
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 2, 'BAD']], do_sleep=True, sleep_time=2)
self.assert_no_log_match('SERVICE NOTIFICATION.*;CRITICAL;')
self.assertEqual(1, svc.current_notification_number)
self.show_and_clear_logs()
self.show_and_clear_actions()
if __name__ == '__main__':
unittest.main()
| 27,919
|
Python
|
.py
| 502
| 46.986056
| 133
| 0.528063
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,271
|
test_bad_sat_realm_conf.py
|
shinken-solutions_shinken/test/test_bad_sat_realm_conf.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestBadSatRealmConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_sat_realm_conf.cfg')
def test_badconf(self):
self.assertFalse(self.conf.conf_is_correct)
if __name__ == '__main__':
unittest.main()
| 1,254
|
Python
|
.py
| 31
| 38.258065
| 82
| 0.755354
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,272
|
test_notification_master.py
|
shinken-solutions_shinken/test/test_notification_master.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Sebastien Coavoux, s.coavoux@free.fr
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken_test import ShinkenTest, unittest
from shinken.notification import Notification
class TestMasterNotif(ShinkenTest):
# For a service, we generate a notification and a event handler.
# Each one got a specific reactionner_tag that we will look for.
def test_master_notif(self):
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 0, 'BAD | value1=0 value2=0']])
### hack Notification.__init__ to save the newly created instances :
_new_notifs = []
_old_notif_init = Notification.__init__
def _mock_notif_init(self, *a, **kw):
_old_notif_init(self, *a, **kw)
_new_notifs.append(self) # save it!
Notification.__init__ = _mock_notif_init
try:
# this scheduler_loop will create a new notification:
self.scheduler_loop(2, [[svc, 2, 'BAD | value1=0 value2=0']])
finally: # be courteous and always undo what we've mocked once we don't need it anymore:
Notification.__init__ = _old_notif_init
self.assertNotEqual(0, len(_new_notifs),
"A Notification should have been created !")
guessed_notif = _new_notifs[0] # and we hope that it's the good one..
self.assertIs(guessed_notif, self.sched.actions.get(guessed_notif.id, None),
"Our guessed notification does not match what's in scheduler actions dict !\n"
"guessed_notif=[%s] sched.actions=%r" % (guessed_notif, self.sched.actions))
guessed_notif.t_to_go = time.time() # Hack to set t_to_go now, so that the notification is processed
# Try to set master notif status to inpoller
actions = self.sched.get_to_run_checks(False, True)
# But no, still scheduled
self.assertEqual('scheduled', guessed_notif.status)
# And still no action for our receivers
self.assertEqual([], actions)
if __name__ == '__main__':
unittest.main()
| 3,549
|
Python
|
.py
| 67
| 46.328358
| 134
| 0.669839
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,273
|
test_python_crash_with_recursive_bp_rules.py
|
shinken-solutions_shinken/test/test_python_crash_with_recursive_bp_rules.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_python_crash_with_recursive_bp_rules.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host1 = self.sched.hosts.find_by_name("ht34-peret-2-dif0")
host2 = self.sched.hosts.find_by_name("ht34-peret-2-dif1")
self.scheduler_loop(5, [[host1, 2, 'DOWN | value1=1 value2=2'], [host2, 2, 'DOWN | rtt=10']])
if __name__ == '__main__':
unittest.main()
| 1,634
|
Python
|
.py
| 39
| 38.307692
| 101
| 0.713115
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,274
|
test_satellites.py
|
shinken-solutions_shinken/test/test_satellites.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
# setUp is inherited from ShinkenTest
def test_satellite_failed_check(self):
print("Create a Scheduler dummy")
r = self.conf.realms.find_by_name('Default')
creation_tab = {'scheduler_name': 'scheduler-1', 'address': '0.0.0.0', 'spare': '0',
'port': '9999', 'check_interval': '1', 'realm': 'Default', 'use_ssl': '0', 'hard_ssl_name_check': '0'}
s = SchedulerLink(creation_tab)
s.last_check = time.time() - 100
s.timeout = 3
s.check_interval = 1
s.data_timeout = 120
s.port = 9999
s.max_check_attempts = 4
s.realm = r
# Lie: we start at true here
s.alive = True
print(s.__dict__)
# Should be attempt = 0
self.assertEqual(0, s.attempt)
# Now make bad ping, sould be unreach and dead (but not dead
s.ping()
self.assertEqual(1, s.attempt)
self.assertEqual(True, s.alive)
self.assertEqual(False, s.reachable)
# Now make bad ping, sould be unreach and dead (but not dead
s.last_check = time.time() - 100
s.ping()
self.assertEqual(2, s.attempt)
self.assertEqual(True, s.alive)
self.assertEqual(False, s.reachable)
# Now make bad ping, sould be unreach and dead (but not dead
s.last_check = time.time() - 100
s.ping()
self.assertEqual(3, s.attempt)
self.assertEqual(True, s.alive)
self.assertEqual(False, s.reachable)
# Ok, this time we go DEAD!
s.last_check = time.time() - 100
s.ping()
self.assertEqual(4, s.attempt)
self.assertEqual(False, s.alive)
self.assertEqual(False, s.reachable)
# Now set a OK ping (false because we won't open the port here...)
s.last_check = time.time() - 100
s.set_alive()
self.assertEqual(0, s.attempt)
self.assertEqual(True, s.alive)
self.assertEqual(True, s.reachable)
if __name__ == '__main__':
unittest.main()
| 3,068
|
Python
|
.py
| 75
| 34.426667
| 126
| 0.648758
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,275
|
test_regenerator.py
|
shinken-solutions_shinken/test/test_regenerator.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken_test import ShinkenTest, unittest
from shinken.objects import Service
from shinken.misc.regenerator import Regenerator
class TestRegenerator(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_regenerator.cfg')
def look_for_same_values(self):
# Look at Regenerator values
print("Hosts:", self.rg.hosts.__dict__)
for h in self.rg.hosts:
orig_h = self.sched.hosts.find_by_name(h.host_name)
print(h.state, orig_h.state)
# Look for same states
self.assertEqual(orig_h.state, h.state)
self.assertEqual(orig_h.state_type, h.state_type)
# Look for same impacts
for i in h.impacts:
print("Got impact", i.get_name())
same_impacts = i.get_name() in [j.get_name() for j in orig_h.impacts]
self.assertTrue(same_impacts)
# And look for same source problems
for i in h.source_problems:
print("Got source pb", i.get_name())
same_pbs = i.get_name() in [j.get_name() for j in orig_h.source_problems]
self.assertTrue(same_pbs)
print("Services:", self.rg.services.__dict__)
for s in self.rg.services:
orig_s = self.sched.services.find_srv_by_name_and_hostname(s.host.host_name, s.service_description)
print(s.state, orig_s.state)
self.assertEqual(orig_s.state, s.state)
self.assertEqual(orig_s.state_type, s.state_type)
# Look for same impacts too
for i in s.impacts:
print("Got impact", i.get_name())
same_impacts = i.get_name() in [j.get_name() for j in orig_s.impacts]
self.assertTrue(same_impacts)
# And look for same source problems
for i in s.source_problems:
print("Got source pb", i.get_name())
same_pbs = i.get_name() in [j.get_name() for j in orig_s.source_problems]
self.assertTrue(same_pbs)
# Look for same host
self.assertEqual(orig_s.host.get_name(), s.host.get_name())
def test_regenerator(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
# for h in self.sched.hosts:
# h.realm = h.realm.get_name()
self.sched.conf.skip_initial_broks = False
self.sched.brokers['Default-Broker'] = {'broks' : [], 'has_full_broks' : False}
self.sched.fill_initial_broks('Default-Broker')
self.rg = Regenerator()
# Got the initial creation ones
t0 = time.time()
for b in self.sched.broks:
print("Manage b", b.type)
#b.prepare()
self.rg.manage_brok(b)
t1 = time.time()
print('First inc', t1 - t0, len(self.sched.broks))
del self.sched.broks[:]
self.look_for_same_values()
print("Get the hosts and services")
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(3, [[host, 2, 'DOWN | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('DOWN', host.state)
self.assertEqual('HARD', host.state_type)
t0 = time.time()
for b in self.sched.broks:
print("Manage b", b.type)
#b.prepare()
self.rg.manage_brok(b)
t1 = time.time()
print('Time', t1 - t0)
del self.sched.broks[:]
self.look_for_same_values()
print('Time', t1 - t0)
b = svc.get_initial_status_brok()
#b.prepare()
print("GO BENCH!")
t0 = time.time()
for i in range(1, 1000):
b = svc.get_initial_status_brok()
#b.prepare()
s = Service({})
for (prop, value) in b.data.items():
setattr(s, prop, value)
t1 = time.time()
print("Bench end:", t1 - t0)
times = {}
sizes = {}
import pickle
data = {}
cls = svc.__class__
start = time.time()
for i in range(1, 10000):
for prop, entry in svc.__class__.properties.items():
# Is this property intended for brokking?
if 'full_status' in entry.fill_brok:
data[prop] = svc.get_property_value_for_brok(prop, cls.properties)
if not prop in times:
times[prop] = 0
sizes[prop] = 0
t0 = time.time()
tmp = pickle.dumps(data[prop], 0)
sizes[prop] += len(tmp)
times[prop] += time.time() - t0
print("Times")
for (k, v) in times.items():
print("\t%s: %s" % (k, v))
print("\n\n")
print("Sizes")
for (k, v) in sizes.items():
print("\t%s: %s" % (k, v))
print("\n")
print("total time", time.time() - start)
def test_regenerator_load_from_scheduler(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
# for h in self.sched.hosts:
# h.realm = h.realm.get_name()
self.rg = Regenerator()
self.rg.load_from_scheduler(self.sched)
self.sched.conf.skip_initial_broks = False
self.sched.brokers['Default-Broker'] = {'broks' : [], 'has_full_broks' : False}
self.sched.fill_initial_broks('Default-Broker')
# Got the initial creation ones
t0 = time.time()
for b in self.sched.broks:
print("Manage b", b.type)
#b.prepare()
self.rg.manage_brok(b)
t1 = time.time()
print('First inc', t1 - t0, len(self.sched.broks))
del self.sched.broks[:]
self.look_for_same_values()
if __name__ == '__main__':
unittest.main()
| 7,423
|
Python
|
.py
| 174
| 32.862069
| 136
| 0.578649
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,276
|
test_antivirg.py
|
shinken-solutions_shinken/test/test_antivirg.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
# load the configuration from file
self.setup_with_file('etc/shinken_antivirg.cfg')
def test_hostname_antivirg(self):
"""Check that it is allowed to have a host with the "__ANTI-VIRG__" substring in its hostname"""
# the global configuration must be valid
self.assertTrue(self.conf.conf_is_correct)
# try to get the host
# if it is not possible to get the host, it is probably because
# "__ANTI-VIRG__" has been replaced by ";"
hst = self.conf.hosts.find_by_name('test__ANTI-VIRG___0')
self.assertIsNotNone(hst, "host 'test__ANTI-VIRG___0' not found")
# Check that the host has a valid configuration
self.assertTrue(hst.is_correct(), "config of host '%s' is not true" % hst.get_name())
def test_parsing_comment(self):
"""Check that the semicolon is a comment delimiter"""
# the global configuration must be valid
self.assertTrue(self.conf.conf_is_correct, "config is not correct")
# try to get the host
hst = self.conf.hosts.find_by_name('test_host_1')
self.assertIsNotNone(hst, "host 'test_host_1' not found")
# Check that the host has a valid configuration
self.assertTrue(hst.is_correct(), "config of host '%s' is not true" % (hst.get_name()))
def test_escaped_semicolon(self):
"""Check that it is possible to have a host with a semicolon in its hostname
The consequences of this aren't tested. We try just to send a command but
I think that others programs which send commands don't think to escape
the semicolon.
"""
# the global configuration must be valid
self.assertTrue(self.conf.conf_is_correct)
# try to get the host
hst = self.conf.hosts.find_by_name('test_host_2;with_semicolon')
self.assertIsNotNone(hst, "host 'test_host_2;with_semicolon' not found")
# Check that the host has a valid configuration
self.assertTrue(hst.is_correct(), "config of host '%s' is not true" % hst.get_name())
# We can send a command by escaping the semicolon.
command = '[%lu] PROCESS_HOST_CHECK_RESULT;test_host_2\;with_semicolon;2;down' % (time.time())
self.sched.run_external_command(command)
# can need 2 run for get the consum (I don't know why)
self.scheduler_loop(1, [])
self.scheduler_loop(1, [])
if '__main__' == __name__:
unittest.main()
| 2,699
|
Python
|
.py
| 49
| 46.795918
| 104
| 0.655369
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,277
|
test_bad_notification_character.py
|
shinken-solutions_shinken/test/test_bad_notification_character.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_notification_character.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
for n in list(svc.notifications_in_progress.values()):
print("HEHE")
print(n.__dict__)
n.execute()
print(n.exit_status)
n.output = u'I love myself $£¤'
self.sched.put_results(n)
if __name__ == '__main__':
unittest.main()
| 2,409
|
Python
|
.py
| 56
| 37.464286
| 134
| 0.664389
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,278
|
test_contactgroup_nomembers.py
|
shinken-solutions_shinken/test/test_contactgroup_nomembers.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestContactgroupWitoutMembers(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_contactgroup_nomembers.cfg')
# It seems that a contact group with no member cause some crash for the arbiter.
# should fix it :)
def test_contactgroup_nomember(self):
# Look for the members of the test_contact_nomember
cg = self.sched.conf.contactgroups.find_by_name('test_contact_nomember')
self.assertIsNot(cg, None)
print(cg.members)
self.assertEqual([], cg.members)
if __name__ == '__main__':
unittest.main()
| 1,581
|
Python
|
.py
| 37
| 39.648649
| 84
| 0.743322
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,279
|
test_service_description_inheritance.py
|
shinken-solutions_shinken/test/test_service_description_inheritance.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Sebastien Coavoux, s.coavoux@free.fr
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestServiceDescriptionInheritance(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_service_description_inheritance.cfg')
def test_service_description_inheritance(self):
self.print_header()
svc = self.sched.services.find_srv_by_name_and_hostname("MYHOST", "SSH")
self.assertIsNotNone(svc)
def test_service_description_inheritance_multihosts(self):
self.print_header()
for hname in ["MYHOST2", "MYHOST3"]:
svc = self.sched.services.find_srv_by_name_and_hostname(hname, "SSH")
self.assertIsNotNone(svc)
def test_service_description_inheritance_with_defined_value(self):
for hname in ["MYHOST4"]:
svc = self.sched.services.find_srv_by_name_and_hostname(hname, "SUPER_SSH")
self.assertIsNotNone(svc)
for hname in ["MYHOST5"]:
svc = self.sched.services.find_srv_by_name_and_hostname(hname, "GOOD_SSH")
self.assertIsNotNone(svc)
def test_service_description_inheritance_with_duplicate(self):
for hname in ["MYHOST6"]:
svc = self.sched.services.find_srv_by_name_and_hostname(hname, "sys: cpu1")
self.assertIsNotNone(svc)
svc1 = self.sched.services.find_srv_by_name_and_hostname(hname, "sys: /tmp")
self.assertIsNotNone(svc1)
if __name__ == '__main__':
unittest.main()
| 2,357
|
Python
|
.py
| 50
| 41.6
| 88
| 0.708624
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,280
|
test_bad_template.py
|
shinken-solutions_shinken/test/test_bad_template.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import ShinkenTest
class TestConfig(ShinkenTest):
def setUp(self):
pass # force no setUp for this class.
def test_bad_template_use_itself(self):
self.setup_with_file('etc/bad_template_use_itself.cfg')
self.assertIn(u"Host u'bla' use/inherits from itself ! Imported from: etc/bad_template_use_itself.cfg:1",
self.conf.hosts.configuration_errors)
def test_bad_host_use_undefined_template(self):
self.setup_with_file('etc/bad_host_use_undefined_template.cfg')
self.assertIn(u"Host u'bla' use/inherit from an unknown template (u'undefined') ! Imported from: etc/bad_host_use_undefined_template.cfg:2",
self.conf.hosts.configuration_warnings)
| 842
|
Python
|
.py
| 13
| 55.846154
| 148
| 0.706813
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,281
|
test_resultmodulation.py
|
shinken-solutions_shinken/test/test_resultmodulation.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_resultmodulation.cfg')
def get_svc(self):
return self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
def get_host(self):
return self.sched.hosts.find_by_name("test_host_0")
def get_router(self):
return self.sched.hosts.find_by_name("test_router_0")
def test_service_resultmodulation(self):
svc = self.get_svc()
host = self.get_host()
router = self.get_router()
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [svc, 2, 'BAD | value1=0 value2=0'],])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
# This service got a result modulation. So Criticals are in fact
# Warnings. So even with some CRITICAL (2), it must be warning
self.assertEqual('WARNING', svc.state)
# If we remove the resultmodulations, we should have theclassic behavior
svc.resultmodulations = []
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('CRITICAL', svc.state)
# Now look for the inheritaed thing
# resultmodulation is a inplicit inherited parameter
# and router define it, but not test_router_0/test_ok_0. So this service should also be impacted
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_router_0", "test_ok_0")
self.assertEqual(router.resultmodulations, svc2.resultmodulations)
self.scheduler_loop(2, [[svc2, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('WARNING', svc2.state)
if __name__ == '__main__':
unittest.main()
| 2,760
|
Python
|
.py
| 56
| 44.142857
| 107
| 0.698661
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,282
|
test_service_withhost_exclude.py
|
shinken-solutions_shinken/test/test_service_withhost_exclude.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class Testservice_withhost_exclude(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_service_withhost_exclude.cfg')
def test_service_withhost_exclude(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
svc_exist = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "NotEverywhere")
self.assertIsNot(svc_exist, None)
svc_not_exist = self.sched.services.find_srv_by_name_and_hostname("test_router_0", "NotEverywhere")
self.assertIs(None, svc_not_exist)
self.assertTrue(self.sched.conf.is_correct)
if __name__ == '__main__':
unittest.main()
| 1,699
|
Python
|
.py
| 39
| 40
| 107
| 0.733051
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,283
|
test_complex_hostgroups.py
|
shinken-solutions_shinken/test/test_complex_hostgroups.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestComplexHostgroups(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_complex_hostgroups.cfg')
def get_svc(self):
return self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
def find_service(self, name, desc):
return self.sched.services.find_srv_by_name_and_hostname(name, desc)
def find_host(self, name):
return self.sched.hosts.find_by_name(name)
def find_hostgroup(self, name):
return self.sched.hostgroups.find_by_name(name)
def dump_hosts(self, svc):
for h in svc.host_name:
print(h)
# check if service exist in hst, but NOT in others
def srv_define_only_on(self, desc, hsts):
r = True
# first hsts
for h in hsts:
svc = self.find_service(h.host_name, desc)
if svc is None:
print("Error: the host %s is missing service %s!!" % (h.host_name, desc))
r = False
for h in self.sched.hosts:
if h not in hsts:
svc = self.find_service(h.host_name, desc)
if svc is not None:
print("Error: the host %s got the service %s!!" % (h.host_name, desc))
r = False
return r
def test_complex_hostgroups(self):
print(self.sched.services.items)
svc = self.get_svc()
print("Service", svc)
#print(self.conf.hostgroups)
# All our hosts
test_linux_web_prod_0 = self.find_host('test_linux_web_prod_0')
test_linux_web_qual_0 = self.find_host('test_linux_web_qual_0')
test_win_web_prod_0 = self.find_host('test_win_web_prod_0')
test_win_web_qual_0 = self.find_host('test_win_web_qual_0')
test_linux_file_prod_0 = self.find_host('test_linux_file_prod_0')
hg_linux = self.find_hostgroup('linux')
hg_web = self.find_hostgroup('web')
hg_win = self.find_hostgroup('win')
hg_file = self.find_hostgroup('file')
print("HG Linux", hg_linux)
for h in hg_linux:
print("H", h.get_name())
self.assertIn(test_linux_web_prod_0, hg_linux.members)
self.assertNotIn(test_linux_web_prod_0, hg_file.members)
# First the service define for linux only
svc = self.find_service('test_linux_web_prod_0', 'linux_0')
print("Service Linux only", svc.get_dbg_name())
r = self.srv_define_only_on('linux_0', [test_linux_web_prod_0, test_linux_web_qual_0, test_linux_file_prod_0])
self.assertEqual(True, r)
print("Service Linux,web")
r = self.srv_define_only_on('linux_web_0', [test_linux_web_prod_0, test_linux_web_qual_0, test_linux_file_prod_0, test_win_web_prod_0, test_win_web_qual_0])
self.assertEqual(True, r)
### Now the real complex things :)
print("Service Linux&web")
r = self.srv_define_only_on('linux_AND_web_0', [test_linux_web_prod_0, test_linux_web_qual_0])
self.assertEqual(True, r)
print("Service Linux|web")
r = self.srv_define_only_on('linux_OR_web_0', [test_linux_web_prod_0, test_linux_web_qual_0, test_win_web_prod_0, test_win_web_qual_0, test_linux_file_prod_0])
self.assertEqual(True, r)
print("(linux|web),file")
r = self.srv_define_only_on('linux_OR_web_PAR_file0', [test_linux_web_prod_0, test_linux_web_qual_0, test_win_web_prod_0, test_win_web_qual_0, test_linux_file_prod_0, test_linux_file_prod_0])
self.assertEqual(True, r)
print("(linux|web)&prod")
r = self.srv_define_only_on('linux_OR_web_PAR_AND_prod0', [test_linux_web_prod_0, test_win_web_prod_0, test_linux_file_prod_0])
self.assertEqual(True, r)
print("(linux|web)&(*&!prod)")
r = self.srv_define_only_on('linux_OR_web_PAR_AND_NOT_prod0', [test_linux_web_qual_0, test_win_web_qual_0])
self.assertEqual(True, r)
print("Special minus problem")
r = self.srv_define_only_on('name-with-minus-in-it', [test_linux_web_prod_0])
self.assertEqual(True, r)
print("(linux|web)&prod AND not test_linux_file_prod_0")
r = self.srv_define_only_on('linux_OR_web_PAR_AND_prod0_AND_NOT_test_linux_file_prod_0', [test_linux_web_prod_0, test_win_web_prod_0])
self.assertEqual(True, r)
print("win&((linux|web)&prod) AND not test_linux_file_prod_0")
r = self.srv_define_only_on('WINDOWS_AND_linux_OR_web_PAR_AND_prod0_AND_NOT_test_linux_file_prod_0', [test_win_web_prod_0])
self.assertEqual(True, r)
if __name__ == '__main__':
unittest.main()
| 5,622
|
Python
|
.py
| 109
| 43.889908
| 199
| 0.64752
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,284
|
test_missing_object_value.py
|
shinken-solutions_shinken/test/test_missing_object_value.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestMissingObjectValue(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_missing_object_value.cfg')
def test_missing_object_value(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.conf.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.conf.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.conf.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# The service is mising a value for active_check_enabled, it's an error.
self.assertEqual(False, svc.is_correct())
if __name__ == '__main__':
unittest.main()
| 2,047
|
Python
|
.py
| 47
| 39.021277
| 90
| 0.7
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,285
|
test_strange_characters_commands.py
|
shinken-solutions_shinken/test/test_strange_characters_commands.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestStrangeCaracterInCommands(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_strange_characters_commands.cfg')
time_hacker.set_real_time()
# Try to call check dummy with very strange caracters and co, see if it run or
# failed badly
def test_strange_characters_commands(self):
if os.name == 'nt':
return
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
#self.assertEqual('UP', host.state)
#self.assertEqual('HARD', host.state_type)
print(svc.check_command.command.command_line)
self.assertEqual(0, len(svc.get_checks_in_progress()))
svc.launch_check(time.time())
print(svc.checks_in_progress)
self.assertEqual(1, len(svc.get_checks_in_progress()))
c = svc.get_checks_in_progress().pop()
#print(c)
c.execute()
time.sleep(0.5)
c.check_finished(8000)
print(c.status)
self.assertEqual('done', c.status)
print(c.output)
self.assertEqual('£°é§', c.output)
print("Done with good output, that's great")
svc.consume_result(c)
self.assertEqual('£°é§', svc.output)
if __name__ == '__main__':
unittest.main()
| 3,046
|
Python
|
.py
| 71
| 36.915493
| 135
| 0.663848
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,286
|
test_commands_perfdata.py
|
shinken-solutions_shinken/test/test_commands_perfdata.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test acknowledge of problems
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_commands_perfdata.cfg')
def test_service_perfdata_command(self):
self.print_header()
# We want an eventhandelr (the perfdata command) to be put in the actions dict
# after we got a service check
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
print("Service perfdata command", svc.__class__.perfdata_command, type(svc.__class__.perfdata_command))
# We do not want to be just a string but a real command
self.assertNotIsInstance(svc.__class__.perfdata_command, str)
print(svc.__class__.perfdata_command.__class__.my_type)
self.assertEqual('CommandCall', svc.__class__.perfdata_command.__class__.my_type)
self.scheduler_loop(1, [[svc, 0, 'OK | bibi=99%']])
print("Actions", self.sched.actions)
self.assertEqual(1, self.count_actions())
# Ok now I disable the perfdata
now = time.time()
cmd = "[%lu] DISABLE_PERFORMANCE_DATA" % now
self.sched.run_external_command(cmd)
self.scheduler_loop(1, [[svc, 0, 'OK | bibi=99%']])
print("Actions", self.sched.actions)
self.assertEqual(0, self.count_actions())
def test_host_perfdata_command(self):
# We want an eventhandelr (the perfdata command) to be put in the actions dict
# after we got a service check
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
print("Host perfdata command", host.__class__.perfdata_command, type(host.__class__.perfdata_command))
# We do not want to be just a string but a real command
self.assertNotIsInstance(host.__class__.perfdata_command, str)
print(host.__class__.perfdata_command.__class__.my_type)
self.assertEqual('CommandCall', host.__class__.perfdata_command.__class__.my_type)
self.scheduler_loop(1, [[host, 0, 'UP | bibi=99%']])
print("Actions", self.sched.actions)
self.assertEqual(1, self.count_actions())
# Ok now I disable the perfdata
now = time.time()
cmd = "[%lu] DISABLE_PERFORMANCE_DATA" % now
self.sched.run_external_command(cmd)
self.scheduler_loop(1, [[host, 0, 'UP | bibi=99%']])
print("Actions", self.sched.actions)
self.assertEqual(0, self.count_actions())
def test_multiline_perfdata(self):
self.print_header()
# We want an eventhandelr (the perfdata command) to be put in the actions dict
# after we got a service check
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
print("Service perfdata command", svc.__class__.perfdata_command, type(svc.__class__.perfdata_command))
# We do not want to be just a string but a real command
self.assertNotIsInstance(svc.__class__.perfdata_command, str)
print(svc.__class__.perfdata_command.__class__.my_type)
self.assertEqual('CommandCall', svc.__class__.perfdata_command.__class__.my_type)
output = """DISK OK - free space: / 3326 MB (56%); | /=2643MB;5948;5958;0;5968
/ 15272 MB (77%);
/boot 68 MB (69%);
/home 69357 MB (27%);
/var/log 819 MB (84%); | /boot=68MB;88;93;0;98
/home=69357MB;253404;253409;0;253414
/var/log=818MB;970;975;0;980
"""
self.scheduler_loop(1, [[svc, 0, output]])
print("Actions", self.sched.actions)
print('Output', svc.output)
print('long', svc.long_output)
print('perf', svc.perf_data)
self.assertEqual('DISK OK - free space: / 3326 MB (56%);', svc.output.strip())
self.assertEqual(u'/=2643MB;5948;5958;0;5968 /boot=68MB;88;93;0;98 /home=69357MB;253404;253409;0;253414 /var/log=818MB;970;975;0;980', svc.perf_data.strip())
print(svc.long_output.split('\n'))
self.assertEqual(u"""/ 15272 MB (77%);
/boot 68 MB (69%);
/home 69357 MB (27%);
/var/log 819 MB (84%);""", svc.long_output)
if __name__ == '__main__':
unittest.main()
| 6,459
|
Python
|
.py
| 125
| 44.936
| 165
| 0.611841
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,287
|
test_inheritance_and_plus.py
|
shinken-solutions_shinken/test/test_inheritance_and_plus.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestInheritanceAndPlus(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_inheritance_and_plus.cfg')
def test_inheritance_and_plus(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
linux = self.sched.hostgroups.find_by_name('linux')
self.assertIsNot(linux, None)
dmz = self.sched.hostgroups.find_by_name('DMZ')
self.assertIsNot(dmz, None)
mysql = self.sched.hostgroups.find_by_name('mysql')
self.assertIsNot(mysql, None)
host1 = self.sched.hosts.find_by_name("test-server1")
host2 = self.sched.hosts.find_by_name("test-server2")
# HOST 1 is lin-servers,dmz, so should be in linux AND DMZ group
for hg in host1.hostgroups:
print(hg.get_name())
self.assertIn(linux.get_name(), [hg.get_name() for hg in host1.hostgroups])
self.assertIn(dmz.get_name(), [hg.get_name() for hg in host1.hostgroups])
# HOST2 is in lin-servers,dmz and +mysql, so all three of them
for hg in host2.hostgroups:
print(hg.get_name())
self.assertIn(linux.get_name(), [hg.get_name() for hg in host2.hostgroups])
self.assertIn(dmz.get_name(), [hg.get_name() for hg in host2.hostgroups])
self.assertIn(mysql.get_name(), [hg.get_name() for hg in host2.hostgroups])
service = self.sched.services.find_srv_by_name_and_hostname("pack-host", 'CHILDSERV')
sgs = [sg.get_name() for sg in service.servicegroups]
self.assertIn("generic-sg", sgs)
self.assertIn("another-sg", sgs)
def test_pack_like_inheritance(self):
# get our pack service
host = self.sched.hosts.find_by_name('pack-host')
service = host.find_service_by_name('CHECK-123')
# it should exist
self.assertIsNotNone(service)
# it should contain the custom variable `_CUSTOM_123` because custom
# variables are always stored in upper case
customs = service.customs
self.assertIn('_CUSTOM_123', customs)
if __name__ == '__main__':
unittest.main()
| 3,152
|
Python
|
.py
| 66
| 41.454545
| 93
| 0.681966
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,288
|
test_no_notification_period.py
|
shinken-solutions_shinken/test/test_no_notification_period.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestNoNotificationPeriod(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_no_notification_period.cfg')
# no notification period should do a 24x7 like period
# so a None, but always valid in create_notification
def test_no_notification_period(self):
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 0, 'OK | value1=0 value2=0']])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
# Now get bad :)
self.scheduler_loop(2, [[svc, 2, 'BAD | value1=0 value2=0']])
self.assertIs(None, svc.notification_period)
self.assert_any_log_match('SERVICE NOTIFICATION.*;CRITICAL')
# Now for the host :)
self.scheduler_loop(5, [[host, 2, 'BAD | value1=0 value2=0']])
self.assertIs(None, host.notification_period)
self.assert_any_log_match('HOST NOTIFICATION.*;DOWN')
if __name__ == '__main__':
unittest.main()
| 2,584
|
Python
|
.py
| 54
| 42.851852
| 133
| 0.68745
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,289
|
test_host_missing_adress.py
|
shinken-solutions_shinken/test/test_host_missing_adress.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_host_missing_adress.cfg')
def test_host_missing_adress(self):
# The router got no adress. It should be set with the
# host_name instead and should nto be an error
now = time.time()
router = self.sched.hosts.find_by_name("test_router_0")
print("router adress:", router.address)
self.assertEqual('test_router_0', router.address)
if __name__ == '__main__':
unittest.main()
| 1,518
|
Python
|
.py
| 36
| 39.055556
| 82
| 0.733379
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,290
|
test_srv_badhost.py
|
shinken-solutions_shinken/test/test_srv_badhost.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestServiceWhithBadHost(ShinkenTest):
def setUp(self):
try:
self.setup_with_file('etc/shinken_srv_badhost.cfg')
except AttributeError:
pass
# Nagios allow service with no host to exist, it will just drop them
def test_ServiceWhitNoHost(self):
self.assertEqual(False, self.conf.conf_is_correct)
if __name__ == '__main__':
unittest.main()
| 1,405
|
Python
|
.py
| 35
| 37.085714
| 82
| 0.742291
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,291
|
test_business_rules_with_bad_realm_conf.py
|
shinken-solutions_shinken/test/test_business_rules_with_bad_realm_conf.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestBusinessRulesBadRealmConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_business_rules_bad_realm_conf.cfg')
def test_bad_conf(self):
self.assertFalse(self.conf.conf_is_correct)
if __name__ == '__main__':
unittest.main()
| 1,275
|
Python
|
.py
| 31
| 38.967742
| 82
| 0.758091
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,292
|
test_groups_with_no_alias.py
|
shinken-solutions_shinken/test/test_groups_with_no_alias.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestGroupwithNoAlias(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_groups_with_no_alias.cfg')
def test_look_for_alias(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
hg = self.sched.hostgroups.find_by_name("NOALIAS")
self.assertIsNot(hg, None)
print(hg.__dict__)
self.assertEqual("NOALIAS", hg.alias)
sg = self.sched.servicegroups.find_by_name("NOALIAS")
self.assertIsNot(sg, None)
print(sg.__dict__)
self.assertEqual("NOALIAS", sg.alias)
if __name__ == '__main__':
unittest.main()
| 1,739
|
Python
|
.py
| 44
| 35.295455
| 82
| 0.707418
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,293
|
test_action.py
|
shinken-solutions_shinken/test/test_action.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import time
from shinken_test import ShinkenTest, unittest, time_hacker
from shinken.action import Action
class TestAction(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_1r_1h_1s.cfg')
time_hacker.set_real_time()
def wait_finished(self, a, size=8012):
start = time.time()
while True:
# Do the job
if a.status == 'launched':
#print(a.process.poll())
a.check_finished(size)
time.sleep(0.01)
#print(a.status)
if a.status != 'launched':
#print("Finish", a.status)
return
# 20s timeout
if time.time() - start > 20:
print("COMMAND TIMEOUT AT 20s")
return
def test_action(self):
a = Action()
a.timeout = 10
a.env = {}
if os.name == 'nt':
a.command = r'libexec\\dummy_command.cmd'
else:
a.command = "libexec/dummy_command.sh"
self.assertEqual(False, a.got_shell_characters())
a.execute()
self.assertEqual('launched', a.status)
# Give also the max output we want for the command
self.wait_finished(a)
self.assertEqual(0, a.exit_status)
self.assertEqual('done', a.status)
print(a.output)
self.assertEqual("Hi, I'm for testing only. Please do not use me directly, really", a.output)
self.assertEqual("Hip=99% Bob=34mm", a.perf_data)
def test_echo_environment_variables(self):
if os.name == 'nt':
return
a = Action()
a.timeout = 10
a.env = {} # :fixme: this sould be pre-set in Action.__init__()
a.command = "echo $TITI"
self.assertNotIn('TITI', a.get_local_environnement())
a.env = {'TITI': 'est en vacance'}
self.assertIn('TITI', a.get_local_environnement())
self.assertEqual(a.get_local_environnement()['TITI'], 'est en vacance')
a.execute()
self.wait_finished(a)
self.assertEqual(a.output, 'est en vacance')
def test_grep_for_environment_variables(self):
if os.name == 'nt':
return
a = Action()
a.timeout = 10
a.env = {} # :fixme: this sould be pre-set in Action.__init__()
a.command = "/usr/bin/env | grep TITI"
self.assertNotIn('TITI', a.get_local_environnement())
a.env = {'TITI': 'est en vacance'}
self.assertIn('TITI', a.get_local_environnement())
self.assertEqual(a.get_local_environnement()['TITI'],
'est en vacance' )
a.execute()
self.wait_finished(a)
self.assertEqual(a.output, 'TITI=est en vacance')
def test_environment_variables(self):
class ActionWithoutPerfData(Action):
def get_outputs(self, out, max_len):
# do not cut the outputs into perf_data to avoid
# problems with enviroments containing a dash like in
# `LESSOPEN=|/usr/bin/lesspipe.sh %s`
self.output = out
if os.name == 'nt':
return
a = ActionWithoutPerfData()
a.timeout = 10
a.command = "/usr/bin/env"
a.env = {} # :fixme: this sould be pre-set in Action.__init__()
self.assertNotIn('TITI', a.get_local_environnement())
a.env = {'TITI': 'est en vacance'}
self.assertEqual(False, a.got_shell_characters())
self.assertIn('TITI', a.get_local_environnement())
self.assertEqual(a.get_local_environnement()['TITI'],
'est en vacance' )
a.execute()
self.assertEqual('launched', a.status)
# Give also the max output we want for the command
self.wait_finished(a, size=20*1024)
titi_found = False
for l in a.output.splitlines():
if l == 'TITI=est en vacance':
titi_found = True
self.assertTrue(titi_found)
# Some commands are shell without bangs! (like in Centreon...)
# We can show it in the launch, and it should be managed
def test_noshell_bang_command(self):
a = Action()
a.timeout = 10
a.command = "libexec/dummy_command_nobang.sh"
a.env = {}
if os.name == 'nt':
return
self.assertEqual(False, a.got_shell_characters())
a.execute()
self.assertEqual('launched', a.status)
self.wait_finished(a)
self.assertEqual(0, a.exit_status)
self.assertEqual('done', a.status)
def test_got_shell_characters(self):
a = Action()
a.timeout = 10
a.command = "libexec/dummy_command_nobang.sh && echo finished ok"
a.env = {}
if os.name == 'nt':
return
self.assertEqual(True, a.got_shell_characters())
a.execute()
self.assertEqual('launched', a.status)
self.wait_finished(a)
self.assertEqual(0, a.exit_status)
self.assertEqual('done', a.status)
def test_got_pipe_shell_characters(self):
a = Action()
a.timeout = 10
a.command = "libexec/dummy_command_nobang.sh | grep 'Please do not use me directly'"
a.env = {}
if os.name == 'nt':
return
self.assertEqual(True, a.got_shell_characters())
a.execute()
self.assertEqual('launched', a.status)
self.wait_finished(a)
self.assertEqual(0, a.exit_status)
self.assertEqual('done', a.status)
def test_got_unclosed_quote(self):
# https://github.com/naparuba/shinken/issues/155
a = Action()
a.timeout = 10
a.command = "libexec/dummy_command_nobang.sh -a 'wwwwzzzzeeee"
a.env = {}
if os.name == 'nt':
return
a.execute()
self.wait_finished(a)
self.assertEqual('done', a.status)
self.assertEqual('Not a valid shell command: No closing quotation', a.output)
self.assertEqual(3, a.exit_status)
# We got problems on LARGE output, more than 64K in fact.
# We try to solve it with the fcntl and non blocking read
# instead of "communicate" mode. So here we try to get a 100K
# output. Should NOT be in a timeout
def test_huge_output(self):
a = Action()
a.timeout = 5
a.env = {}
if os.name == 'nt':
a.command = """python3 -c 'print("A"*1000000)'"""
# FROM NOW IT4S FAIL ON WINDOWS :(
return
else:
a.command = """python3 -u -c 'print("A"*100000)'"""
print("EXECUTE")
a.execute()
print("EXECUTE FINISE")
self.assertEqual('launched', a.status)
# Give also the max output we want for the command
self.wait_finished(a, 10000000000)
print("Status?", a.exit_status)
self.assertEqual(0, a.exit_status)
print("Output", len(a.output))
self.assertEqual(0, a.exit_status)
self.assertEqual('done', a.status)
self.assertEqual("A"*100000, a.output)
self.assertEqual("", a.perf_data)
def test_execve_fail_with_utf8(self):
if os.name == 'nt':
return
a = Action()
a.timeout = 10
a.env = {} # :fixme: this sould be pre-set in Action.__init__()
a.command = "/bin/echo Wiadomo\u015b\u0107"
a.execute()
self.wait_finished(a)
self.assertEqual(a.output, "Wiadomo\u015b\u0107")
def test_non_zero_exit_status_empty_output_but_non_empty_stderr(self):
a = Action()
a.command = "echo hooo >&2 ; exit 1"
a.timeout = 10
a.env = {} # :fixme: this sould be pre-set in Action.__init__()
a.execute()
self.wait_finished(a)
self.assertEqual(a.output, "hooo")
if __name__ == '__main__':
unittest.main()
| 8,877
|
Python
|
.py
| 224
| 30.875
| 101
| 0.596909
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,294
|
test_safe_pickle.py
|
shinken-solutions_shinken/test/test_safe_pickle.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from shinken_test import *
from shinken.safepickle import SafeUnpickler
import pickle
import sys
import io
should_not_change = False
def fff(b):
global should_not_change
should_not_change = b
class SadPanda(object):
def __reduce__(self):
return (fff, (True,))
class TestSafePickle(ShinkenTest):
def setUp(self):
pass
def launch_safe_pickle(self, buf):
SafeUnpickler(io.BytesIO(buf)).load()
def test_safe_pickle(self):
global should_not_change
print("Creating payload")
buf = pickle.dumps(SadPanda(), 0)
should_not_change = False
print("Payload", buf)
print("Now loading payload")
pickle.loads(buf)
print(should_not_change)
self.assertTrue(should_not_change)
# reset and try our fix
should_not_change = False
def launch_safe_pickle():
SafeUnpickler(io.BytesIO(buf)).load()
self.assertRaises(ValueError, launch_safe_pickle)
print (should_not_change)
self.assertFalse(should_not_change)
# Thanks to security team @Dailymotion, we did have a RCE that ook like a return into libc
# exploit: they are using the bottle._load code (that blindly __import__) so the injected
# code will finally be executed. And as it's shinken.webui.bottle, it's ok with the
# safe_pickle filter. Smart ^^
def test_safe_pickle_exploit_rce(self):
# Bottle shoult not be embedded anymore
if not six.PY2:
return
try:
import shinken.webui.bottlewebui
except ImportError:
return
###### Phase 1: can be exploited
# Arrange
rce_path = '/rce_exploited'
if rce_path in sys.path:
sys.path.remove(rce_path)
payload = """cshinken.webui.bottlewebui
_load
(S'sys:path.append("%s")'
tR.""" % rce_path
payload = payload.encode("utf-8")
# Act
print("Now loading payload")
pickle.loads(payload)
# Assert
self.assertTrue(rce_path in sys.path)
##### Phase 2: no more exploitable by calling the good one
# Arrange
sys.path.remove(rce_path)
def launch_safe_pickle():
SafeUnpickler(io.BytesIO(payload)).load()
# Act
self.assertRaises(ValueError, launch_safe_pickle)
# Assert
self.assertTrue(rce_path not in sys.path)
# Thanks to security team @Dailymotion, we did have a RCE that ook like a return into libc
# exploit: they are using the bottle._load code (that blindly __import__) so the injected
# code will finally be executed. And as it's shinken.webui.bottle, it's ok with the
# safe_pickle filter. Smart ^^
def test_safe_pickle_exploit_rce_can_load(self):
###### Phase 1: can be exploited
# Arrange
payload = pickle.dumps(Brok('void', {}))
# Act
b = SafeUnpickler(io.BytesIO(payload)).load()
# Assert
self.assertTrue(isinstance(b, Brok))
if __name__ == '__main__':
unittest.main()
| 4,071
|
Python
|
.py
| 108
| 31.518519
| 94
| 0.669552
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,295
|
test_service_without_host.py
|
shinken-solutions_shinken/test/test_service_without_host.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class Testservice_without_host(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_service_without_host.cfg')
def test_service_without_host_do_not_break(self):
self.assertIs(False, self.conf.conf_is_correct)
#[b.prepare() for b in self.broks]
logs = [b.data['log'] for b in self.broks if b.type == 'log']
self.assertLess(
0,
len([ log
for log in logs
if re.search("The service 'WillError' got an unknown host_name 'NOEXIST'",
log)
]))
if __name__ == '__main__':
unittest.main()
| 1,647
|
Python
|
.py
| 40
| 36.025
| 94
| 0.687539
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,296
|
test_checkmodulations.py
|
shinken-solutions_shinken/test/test_checkmodulations.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestCheckModulations(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_checkmodulations.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("host_modulated")
self.assertIsNot(host, None)
print(host.checkmodulations)
mod = self.sched.checkmodulations.find_by_name("MODULATION")
self.assertIsNot(mod, None)
self.assertIn(mod, host.checkmodulations)
c = None
for c in host.get_checks_in_progress():
print(c.command)
self.assertEqual('plugins/nothing VALUE', c.command)
if __name__ == '__main__':
unittest.main()
| 1,842
|
Python
|
.py
| 46
| 35.391304
| 82
| 0.709641
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,297
|
test_escalations.py
|
shinken-solutions_shinken/test/test_escalations.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test host- and service-downtimes.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
from shinken.objects.serviceescalation import Serviceescalation
class TestEscalations(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_escalations.cfg')
time_hacker.set_real_time()
def test_wildcard_in_service_descrption(self):
self.print_header()
generated = None
for es in self.sched.conf.escalations:
if re.match("Generated-Serviceescalation-.*", es.get_name()) is not None:
generated = es
break
for svc in self.sched.services.find_srvs_by_hostname("test_host_0"):
self.assertIn(generated, svc.escalations)
def test_simple_escalation(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print("- 1 x OK -------------------------------------")
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
tolevel2 = self.sched.conf.escalations.find_by_name('ToLevel2')
self.assertIsNot(tolevel2, None)
self.assertIn(tolevel2, svc.escalations)
tolevel3 = self.sched.conf.escalations.find_by_name('ToLevel3')
self.assertIsNot(tolevel3, None)
self.assertIn(tolevel3, svc.escalations)
for es in svc.escalations:
print(es.__dict__)
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print("- 1 x BAD get soft -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print("---current_notification_number", svc.current_notification_number)
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print("- 1 x BAD get hard -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# We check if we really notify the level1
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;CRITICAL;')
self.show_and_clear_logs()
#self.show_and_clear_actions()
self.show_actions()
print(svc.notifications_in_progress)
for n in svc.notifications_in_progress.values():
print(n)
# check_notification: yes (hard)
print("---current_notification_number", svc.current_notification_number)
# notification_number is already sent. the next one has been scheduled
# and is waiting for notification_interval to pass. so the current
# number is 2
self.assertEqual(1, svc.current_notification_number)
print("OK, level1 is notified, notif nb = 1")
print("---------------------------------1st round with a hard")
print("find a way to get the number of the last reaction")
cnn = svc.current_notification_number
print("- 1 x BAD repeat -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assertIn(True, [n.escalated for n in self.sched.actions.values()])
# Now we raise the notif number of 2, so we can escalade
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print("cnn and cur", cnn, svc.current_notification_number)
self.assertGreater(svc.current_notification_number, cnn)
cnn = svc.current_notification_number
# One more bad, we go 3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assertIn(True, [n.escalated for n in self.sched.actions.values()])
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
# We go 4, still level2
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assertIn(True, [n.escalated for n in self.sched.actions.values()])
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
# We go 5! we escalade to level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assertIn(True, [n.escalated for n in self.sched.actions.values()])
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we send 10 more notif, we must be still level5
for i in range(10):
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we recover, it will be fun because all of level{1,2,3} must be send a
# notif
self.scheduler_loop(2, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.show_actions()
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;OK;')
self.show_and_clear_logs()
def test_time_based_escalation(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0_time")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print("- 1 x OK -------------------------------------")
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
# We check if we correclty linked our escalations
tolevel2_time = self.sched.conf.escalations.find_by_name('ToLevel2-time')
self.assertIsNot(tolevel2_time, None)
self.assertIn(tolevel2_time, svc.escalations)
tolevel3_time = self.sched.conf.escalations.find_by_name('ToLevel3-time')
self.assertIsNot(tolevel3_time, None)
self.assertIn(tolevel3_time, svc.escalations)
# Go for the running part!
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print("- 1 x BAD get soft -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print("---current_notification_number", svc.current_notification_number)
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print("- 1 x BAD get hard -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# We check if we really notify the level1
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
# check_notification: yes (hard)
print("---current_notification_number", svc.current_notification_number)
# notification_number is already sent. the next one has been scheduled
# and is waiting for notification_interval to pass. so the current
# number is 2
self.assertEqual(1, svc.current_notification_number)
print("OK, level1 is notified, notif nb = 1")
print("---------------------------------1st round with a hard")
print("find a way to get the number of the last reaction")
cnn = svc.current_notification_number
print("- 1 x BAD repeat -------------------------------------")
# For the test, we hack the notif value because we do not wan to wait 1 hour!
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
svc.notification_interval = 3600
# and we say that there is still 1hour since the notification creation
# so it will say the notification time is huge, and so it will escalade
n.creation_time = n.creation_time - 3600
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.001)
# Now we raise a notification time of 1hour, we escalade to level2
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print("cnn and cur", cnn, svc.current_notification_number)
# We check that we really raise the notif number too
self.assertGreater(svc.current_notification_number, cnn)
cnn = svc.current_notification_number
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif
n.t_to_go = time.time()
# One more bad, we say: he, it's still near 1 hour, so still level2
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
# Now we go for level3, so again we say: he, in fact we start one hour earlyer,
# so the total notification duration is near 2 hour, so we will raise level3
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
n.t_to_go = time.time()
n.creation_time = n.creation_time - 3600
# One more, we bypass 7200, so now it's level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we send 10 more notif, we must be still level5
for i in range(10):
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
n.t_to_go = time.time()
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we recover, it will be fun because all of level{1,2,3} must be send a
# recovery notif
self.scheduler_loop(2, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.show_actions()
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;OK;')
self.show_and_clear_logs()
# Here we search to know if a escalation really short the notification
# interval if the escalation if BEFORE the next notification. For example
# let say we notify one a day, if the escalation if at 4hour, we need
# to notify at t=0, and get the next notification at 4h, and not 1day.
def test_time_based_escalation_with_shorting_interval(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0_time")
# To make tests quicker we make notifications send very quickly
# 1 day notification interval
svc.notification_interval = 1400
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print("- 1 x OK -------------------------------------")
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
# We check that we really linked our escalations :)
tolevel2_time = self.sched.conf.escalations.find_by_name('ToLevel2-time')
self.assertIsNot(tolevel2_time, None)
self.assertIn(tolevel2_time, svc.escalations)
tolevel3_time = self.sched.conf.escalations.find_by_name('ToLevel3-time')
self.assertIsNot(tolevel3_time, None)
self.assertIn(tolevel3_time, svc.escalations)
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print("- 1 x BAD get soft -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print("---current_notification_number", svc.current_notification_number)
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print("- 1 x BAD get hard -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
print(" ** LEVEL1 ** " * 20)
# We check if we really notify the level1
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
# check_notification: yes (hard)
print("---current_notification_number", svc.current_notification_number)
# notification_number is already sent. the next one has been scheduled
# and is waiting for notification_interval to pass. so the current
# number is 2
self.assertEqual(1, svc.current_notification_number)
print("OK, level1 is notified, notif nb = 1")
print("---------------------------------1st round with a hard")
print("find a way to get the number of the last reaction")
cnn = svc.current_notification_number
print("- 1 x BAD repeat -------------------------------------")
# Now we go for the level2 escalation, so we will need to say: he, it's 1 hour since the begining:p
print("*************Next", svc.notification_interval * svc.__class__.interval_length)
# first, we check if the next notification will really be near 1 hour because the escalation
# to level2 is asking for it. If it don't, the standard was 1 day!
for n in svc.notifications_in_progress.values():
next = svc.get_next_notification_time(n)
print(abs(next - now))
# Check if we find the next notification for the next hour,
# and not for the next day like we ask before
self.assertLess(abs(next - now - 3600), 10)
# And we hack the notification so we can raise really the level2 escalation
for n in svc.notifications_in_progress.values():
n.t_to_go = time.time()
n.creation_time -= 3600
print(" ** LEVEL2 ** " * 20)
# We go in trouble too
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.001)
# Now we raise the time since the begining at 1 hour, so we can escalade
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print("Level 2 got warn, now we search for level3")
print("cnn and cur", cnn, svc.current_notification_number)
self.assertGreater(svc.current_notification_number, cnn)
cnn = svc.current_notification_number
# Now the same thing, but for level3, so one more hour
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
n.t_to_go = time.time()
n.creation_time -= 3600
# One more bad, we say: he, it's 7200 sc of notif, so must be still level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
for n in svc.notifications_in_progress.values():
# we say that the next notif will be right now
# so we can raise a notif now
n.t_to_go = time.time()
# One more, we bypass 7200, so now it's still level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we send 10 more notif, we must be still level3
for i in range(10):
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
n.t_to_go = time.time()
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Ok now we get the normal stuff, we do NOT want to raise so soon a
# notification.
self.scheduler_loop(2, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_actions()
print(svc.notifications_in_progress)
# Should be far away
for n in svc.notifications_in_progress.values():
print(n, n.t_to_go, time.time(), n.t_to_go - time.time())
# Should be "near" one day now, so 84000s
self.assertLess(8300 < abs(n.t_to_go - time.time()), 85000)
# And so no notification
self.assert_no_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
# Now we recover, it will be fun because all of level{1,2,3} must be send a
# recovery notif
self.scheduler_loop(2, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.show_actions()
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;OK;')
self.show_and_clear_logs()
def test_time_based_escalation_with_short_notif_interval(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0_time_long_notif_interval")
# For this specific test, notif interval will be something like 10s
#svc.notification_interval = 0.1
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print("- 1 x OK -------------------------------------")
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
# We hack the interval_length for short time, like 10s
svc.__class__.interval_length = 5
# We check if we correclty linked our escalations
tolevel2_time = self.sched.conf.escalations.find_by_name('ToLevel2-shortinterval')
self.assertIsNot(tolevel2_time, None)
self.assertIn(tolevel2_time, svc.escalations)
#tolevel3_time = self.sched.conf.escalations.find_by_name('ToLevel3-time')
#self.assertIsNot(tolevel3_time, None)
#self.assertIn(tolevel3_time, svc.escalations)
# Go for the running part!
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print("- 1 x BAD get soft -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print("---current_notification_number", svc.current_notification_number)
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print("- 1 x BAD get hard -------------------------------------")
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# We check if we really notify the level1
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
# check_notification: yes (hard)
print("---current_notification_number", svc.current_notification_number)
# notification_number is already sent. the next one has been scheduled
# and is waiting for notification_interval to pass. so the current
# number is 2
self.assertEqual(1, svc.current_notification_number)
print("OK, level1 is notified, notif nb = 1")
print("---------------------------------1st round with a hard")
print("find a way to get the number of the last reaction")
cnn = svc.current_notification_number
print("- 1 x BAD repeat -------------------------------------")
# For the test, we hack the notif value because we do not wan to wait 1 hour!
#for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
# svc.notification_interval = 3600
# and we say that there is still 1hour since the notification creation
# so it will say the notification time is huge, and so it will escalade
# n.creation_time = n.creation_time - 3600
# Sleep 1min and look how the notification is going, only 6s because we will go in
# escalation in 5s (5s = interval_length, 1 for escalation time)
print("---" * 200)
print("We wait a bit, but not enough to go in escalation level2")
time.sleep(2)
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.001)
# Now we raise a notification time of 1hour, we escalade to level2
self.assert_no_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print("---" * 200)
print("OK NOW we will have an escalation!")
time.sleep(5)
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.001)
# Now we raise a notification time of 1hour, we escalade to level2
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print("cnn and cur", cnn, svc.current_notification_number)
# We check that we really raise the notif number too
self.assertGreater(svc.current_notification_number, cnn)
cnn = svc.current_notification_number
# Ok we should have one notification
next_notifications = list(svc.notifications_in_progress.values())
print("LEN", len(next_notifications))
for n in next_notifications:
print(n)
self.assertEqual(1, len(next_notifications))
n = next_notifications.pop()
print("Current NOTIFICATION", n.__dict__, n.t_to_go, time.time(), n.t_to_go - time.time(), n.already_start_escalations)
# Should be in the escalation ToLevel2-shortinterval
self.assertIn('ToLevel2-shortinterval', n.already_start_escalations)
# Ok we want to be sure we are using the current escalation interval, the 1 interval = 5s
# So here we should have a new notification for level2
print("*--*--" * 20)
print("Ok now another notification during the escalation 2")
time.sleep(10)
# One more bad, we say: he, it's still near 1 hour, so still level2
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
# Ok now go in the Level3 thing
print("*--*--" * 20)
print("Ok now goes in level3 too")
time.sleep(10)
# One more, we bypass 7200, so now it's level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Ok we should have one notification
next_notifications = list(svc.notifications_in_progress.values())
self.assertEqual(1, len(next_notifications))
n = next_notifications.pop()
print("Current NOTIFICATION", n.__dict__, n.t_to_go, time.time(), n.t_to_go - time.time(), n.already_start_escalations)
# Should be in the escalation ToLevel2-shortinterval
self.assertIn('ToLevel2-shortinterval', n.already_start_escalations)
self.assertIn('ToLevel3-shortinterval', n.already_start_escalations)
# Make a loop for pass the next notification
time.sleep(5)
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
print("Current NOTIFICATION", n.__dict__, n.t_to_go, time.time(), n.t_to_go - time.time(), n.already_start_escalations)
# Now way a little bit, and with such low value, the escalation3 value must be ok for this test to pass
time.sleep(5)
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we recover, it will be fun because all of level{1,2,3} must be send a
# recovery notif
self.scheduler_loop(2, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.show_actions()
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;OK;')
self.show_and_clear_logs()
if __name__ == '__main__':
unittest.main()
| 30,301
|
Python
|
.py
| 527
| 48.383302
| 127
| 0.590725
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,298
|
test_contactgroups_plus_inheritance.py
|
shinken-solutions_shinken/test/test_contactgroups_plus_inheritance.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test attribute inheritance and the right order
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestPlusInInheritance(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_contactgroups_plus_inheritance.cfg')
def _dump(self, h):
print("Dumping host", h.get_name())
print(h.contact_groups)
for c in h.contacts:
print("->",c.get_name())
def _dump_svc(self,s):
print("Dumping Service", s.get_name())
print(" contact_groups : %s " % s.contact_groups)
for c in s.contacts:
print("->",c.get_name())
def test_contactgroups_plus_inheritance(self):
host0 = self.sched.hosts.find_by_name("test_host_0")
# HOST 1 should have 2 group of contacts
# WARNING, it's a string, not the real objects!
self._dump(host0)
self.assertIn("test_contact_1", [c .get_name() for c in host0.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in host0.contacts])
host2 = self.sched.hosts.find_by_name("test_host_2")
self._dump(host2)
self.assertIn("test_contact_1", [c .get_name() for c in host2.contacts])
host3 = self.sched.hosts.find_by_name("test_host_3")
self._dump(host3)
self.assertIn("test_contact_1", [c .get_name() for c in host3.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in host3.contacts])
host4 = self.sched.hosts.find_by_name("test_host_4")
self._dump(host4)
self.assertIn("test_contact_1", [c .get_name() for c in host4.contacts])
host5 = self.sched.hosts.find_by_name("test_host_5")
self._dump(host5)
self.assertIn("test_contact_1", [c .get_name() for c in host5.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in host5.contacts])
host6 = self.sched.hosts.find_by_name("test_host_6")
self._dump(host6)
self.assertIn("test_contact_1", [c .get_name() for c in host6.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in host6.contacts])
# Now Let's check service inheritance
svc1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "svc_tmplA")
self._dump_svc(svc1)
self.assertIn("test_contact_1", [c .get_name() for c in svc1.contacts])
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "svc_tmplB")
self._dump_svc(svc2)
self.assertIn("test_contact_2", [c .get_name() for c in svc2.contacts])
svc3 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "svc_tmplA_tmplB")
self.assertIn("test_contact_1", [c .get_name() for c in svc3.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in svc3.contacts])
self._dump_svc(svc3)
# Now Let's check multi level service inheritance
svc4 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST-DESC4")
self.assertIn("test_contact_1", [c .get_name() for c in svc4.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in svc4.contacts])
self._dump_svc(svc4)
svc5 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST-DESC4b")
self.assertIn("test_contact_2", [c .get_name() for c in svc5.contacts])
self._dump_svc(svc5)
if __name__ == '__main__':
unittest.main()
| 4,371
|
Python
|
.py
| 84
| 45.452381
| 98
| 0.659315
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,299
|
test_groups_pickle.py
|
shinken-solutions_shinken/test/test_groups_pickle.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_groups_pickle.cfg')
def test_dispatch(self):
sub_confs = self.conf.confs
print("NB SUB CONFS", len(sub_confs))
vcfg = None
# Find where hr1 is
for cfg in sub_confs.values():
if 'HR1' in [h.get_name() for h in cfg.hosts]:
print('FOUNCED', len(cfg.hosts))
vcfg = cfg
# Look ifthe hg in the conf is valid
vhg = vcfg.hostgroups.find_by_name('everyone')
self.assert_(len(vhg.members) == 1)
hr1 = [h for h in vcfg.hosts if h.get_name() == "HR1"][0]
print(hr1.hostgroups)
hg1 = None
for hg in hr1.hostgroups:
if hg.get_name() == 'everyone':
hg1 = hg
print("Founded hostgroup", hg1)
print('There should be only one host there')
self.assert_(len(hg1.members) == 1)
print('and should be the same than the vcfg one!')
self.assert_(hg1 == vhg)
if __name__ == '__main__':
unittest.main()
| 2,093
|
Python
|
.py
| 52
| 34.519231
| 82
| 0.663866
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|